query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Calculate the number of columns and rows required to divide an image into ``n`` parts. Return a tuple of integers in the format (num_columns, num_rows)
def calc_columns_rows(n): num_columns = int(ceil(sqrt(n))) num_rows = int(ceil(n / float(num_columns))) return (num_columns, num_rows)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_rows_columns(num_wells):\n a = math.sqrt(num_wells / 6)\n n_rows = int(round(2 * a))\n n_columns = int(round(3 * a))\n return n_rows, n_columns", "def compute_nrows_ncolumns(nplots):\n n_rows = int(np.sqrt(nplots)) + (np.sqrt(nplots) != int(np.sqrt(nplots))) * 1\n n_columns = int(nplots / n_rows) + (nplots / n_rows != int(nplots / n_rows)) * 1\n return n_rows, n_columns", "def calculate_grid_dimensions(num_items, num_columns=None):\n if num_columns is None:\n num_rows_columns = int(math.ceil(math.sqrt(num_items)))\n return num_rows_columns, num_rows_columns\n else:\n num_rows = int(math.ceil(num_items / num_columns))\n return num_rows, num_columns", "def _num_extracted_rows_and_columns(\n image_size: int,\n patch_size: int,\n stride: int,\n num_scales: int,\n scale_factor: int,\n) -> int:\n largest_patch_size = int(patch_size * (scale_factor**(num_scales - 1)))\n residual = image_size - largest_patch_size\n return (residual // stride) + 1", "def get_num_tiles(rows, cols, row_tile_size, col_tile_size):\n num_row_tiles = math.ceil(rows / row_tile_size)\n num_col_tiles = math.ceil(cols / col_tile_size)\n return num_row_tiles, num_col_tiles", "def getLayoutDimensions(n, pref=\"height\"):\n nopt = np.sqrt(n)\n inoptw = int(nopt)\n inopth = int(nopt)\n while inoptw * inopth < n:\n if pref == \"width\":\n inoptw += 1\n if inoptw * inopth > (n - inopth):\n inoptw -= 1\n inopth += 1\n else:\n inopth += 1\n if inoptw * inopth > (n - inoptw):\n inopth -= 1\n inoptw += 1\n\n return (inopth, inoptw)", "def _get_number_of_rows_to_process(self, bitsPerPixel):\n # TODO: do a better job estimating the number of rows to process.\n # Compute the number of pixels that fit under the memory limit.\n memLimit = (psutil.virtual_memory().available/\n (bitsPerPixel*(1024**2)))\n memLimit = int(50*np.floor(memLimit/10.0))\n numStackPix = memLimit*(1024**2)*8/bitsPerPixel\n\n # Grab the number of images and the shape of those image\n numImg, ny, nx = self.shape\n\n # Compute the number of rows to be processed in each chunk\n numRows = int(np.floor(numStackPix/(numImg*nx)))\n\n # Catch the case where ALL rows get handled at once\n if numRows > ny: numRows = ny\n numSections = int(np.ceil(ny/numRows))\n\n # Recompute the number of rows to be evenly spaced\n numRows = int(np.ceil(ny/numSections))\n\n return numRows, numSections", "def get_grid_size(self, img):\r\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\r\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\r\n return grid_height, grid_width", "def _get_split_sizes(self, n_examples):\n\n min_ex = (int(n_examples // self.n_splits)\n * np.ones(self.n_splits, dtype=np.int8))\n \n rem = np.array(\n [1 if i < n_examples % self.n_splits else 0\n for i in range(self.n_splits)],\n dtype=np.int8)\n\n return np.add(min_ex, rem)", "def number_of_patches(width, height, patch_size):\n n_patches_x = width // patch_size\n n_patches_y = height // patch_size\n return n_patches_x, n_patches_y", "def infer_ncols_nrows(n_subplots, ncols, nrows, max_ncols, **kwargs):\n _ = kwargs\n\n # Make ncols/nrows\n if ncols is None and nrows is None:\n ncols = min(max_ncols, n_subplots)\n nrows = ceil_div(n_subplots, ncols)\n elif ncols is None:\n ncols = ceil_div(n_subplots, nrows)\n elif nrows is None:\n nrows = ceil_div(n_subplots, ncols)\n\n return ncols, nrows", "def get_number_of_rows_and_columns(m):\n\n r = int(np.sqrt(m))\n c = m // r if np.mod(m, r) == 0 else m // r + 1\n return r, c", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corners())\n corners = np.concatenate(corners)[:, :2] / self._pixel_shape\n\n # Find extremes, add 1 px margin to allow for rounding errors\n min_xy = corners.min(axis=0).astype(int) - 1\n max_xy = corners.max(axis=0).astype(int) + 1\n\n size = max_xy - min_xy\n centre = -min_xy\n # Switch xy -> yx\n return tuple(size[::-1]), centre[::-1]", "def row_count(self):\n return self.well_count // self.col_count", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corner_idx)\n corners.append(tile.opp_corner_idx)\n corners = np.stack(corners)\n\n # Find extremes\n min_yx = corners.min(axis=0)\n max_yx = corners.max(axis=0)\n\n size = max_yx - min_yx\n centre = -min_yx\n return tuple(size), centre", "def get_grid_shape(num_examples):\n height = int(numpy.floor(numpy.sqrt(num_examples)))\n width = int(numpy.ceil(num_examples * 1. / height))\n\n return (height, width)", "def get_num_chunks(self) -> int:", "def get_size(img):\n ih, iw = img.shape[:2]\n return iw * ih", "def _compute_rows_and_cols_corrected(n_rows: int, n_cols: int, confmat_sum: Tensor) ->Tuple[Tensor, Tensor]:\n rows_corrected = n_rows - (n_rows - 1) ** 2 / (confmat_sum - 1)\n cols_corrected = n_cols - (n_cols - 1) ** 2 / (confmat_sum - 1)\n return rows_corrected, cols_corrected", "def count_tilings(n: int) -> int:\n if n < 5:\n # handle recursive base case\n return 2**(n - 1)\n else:\n # place each tile at end of row and recurse on remainder\n return (count_tilings(n - 1) +\n count_tilings(n - 2) +\n count_tilings(n - 3) +\n count_tilings(n - 4))", "def count_divisions(num, n):\n count = 0\n while pe_005.is_divisible(num, n):\n num = num // n\n count += 1\n return count, num", "def output_image_size(n_patches_x, n_patches_y, patch_size):\n width = n_patches_x * patch_size\n height = n_patches_y * patch_size\n return width, height", "def getDimension(data):\r\n # open image for reading in binary mode\r\n\r\n # read the 2 bytes\r\n a = data[163:165]\r\n\r\n # calculate height\r\n height = (a[0] << 8) + a[1]\r\n\r\n # next 2 bytes is width\r\n a = data[165:167]\r\n\r\n # calculate width\r\n width = (a[0] << 8) + a[1]\r\n\r\n return (width, height)", "def findWidthHeight():\n\n for f in os.listdir(\"%s/train/images/\" % args.dataset):\n if f.endswith(\".jpeg\"):\n imf = \"%s/train/images/%s\" % (args.dataset, f)\n try:\n im = Image.open(imf)\n except:\n print \"Could not open training image %s to read its size.\" %imf\n usage()\n break\n \n width = int(im.size[0])\n height = int(im.size[1])\n \n nwidth = width\n nheight = height\n if args.width:\n nwidth = args.width\n if args.height:\n nheight = args.height\n\n return width, height, nwidth, nheight, not(width == nwidth and height == nheight)", "def get_dims(self):\n row_lbl, col_lbl = self.get_idxvals()\n return len(row_lbl), len(col_lbl)", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def getNumTiles(self):\n return self.w * self.h", "def _get_image_dimensions(self):\n\t\timageWidth = int(self.labels['IMAGE']['LINE_SAMPLES'])\n\t\timageHeight = int(self.labels['IMAGE']['LINES'])\n\t\treturn imageWidth, imageHeight", "def get_img_dims(img):\n height, width = img.shape[:2]\n return width, height", "def get_num_of_images(self):", "def get_image_size(frame) -> tuple:\n return tuple(frame.shape[1::-1])", "def get_mosaic_dimensions(self) -> tuple:\n n_rows = self.header.get(\"Rows\") // self.volume_shape[0]\n n_columns = self.header.get(\"Columns\") // self.volume_shape[1]\n return n_rows, n_columns", "def size(self):\n return self.num_rows, self.num_cols", "def size(self):\n return self.num_rows, self.num_cols", "def getNumTiles(self):\n return (self.width) * (self.height)", "def pixels_to_length(pixels, boxsize, num_pixels):\n length = (boxsize * pixels) / num_pixels\n\n return length", "def get_n_splits(self):\n return self.n_folds", "def columns(self) -> int:\n return self.__squares[0].__len__()", "def ncells(self):\n return self.izone.size", "def input_image_size(interpreter):\n _, height, width, channels = interpreter.get_input_details()[0]['shape']\n return width, height, channels", "def determine_size(self):\n size = np.inf\n while size >= self.n:\n size = np.random.pareto(0.2)\n size = int(math.ceil(size))\n return size", "def get_size(image):\n width, height = image.size\n\n return (width, height)", "def size(self):\n return self.__row_count * self.__col_count", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def calc_n_patches(img_ref, patch_size):\n with Image.open(img_ref) as img:\n width, height = img.size\n\n line_patches = np.ceil(height / patch_size).astype(int)\n column_patches = np.ceil(width / patch_size).astype(int)\n num_of_patches = line_patches * column_patches\n return num_of_patches", "def get_n_splits(self):\n return self.n_splits", "def get_n_splits(self):\n return self.n_splits", "def get_n_splits(self):\n return self.n_splits", "def getNumTiles(self):\n return self.height * self.width", "def num_cells_for_rows(self, rows):\r\n return (rows * rows + rows) // 2", "def getNumTiles(self):\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html\n return self.width * self.height", "def size_from_args(self):\n rows = self.args[0].size[0]*self.args[1].size[0]\n cols = self.args[0].size[1]*self.args[1].size[1]\n return (rows, cols)", "def getDims(img):\n n,m,k = np.shape(img) \n N,M = 0,0\n for i in range(1,n):\n if np.array_equal(img[i],img[i-1]):\n N += 1\n for j in range(1,m):\n if np.array_equal(img[:,j],img[:,j-1]):\n M += 1\n return N,M,n,m", "def get_chunk_size(N, n):\n\n mem_free = memory()['free']\n if mem_free > 60000000:\n chunks_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))\n return chunks_size\n elif mem_free > 40000000:\n chunks_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))\n return chunks_size\n elif mem_free > 14000000:\n chunks_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))\n return chunks_size\n elif mem_free > 8000000:\n chunks_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))\n return chunks_size\n elif mem_free > 2000000:\n chunks_size = int(((mem_free - 900000) * 1000) / (4 * n * N))\n return chunks_size\n elif mem_free > 1000000:\n chunks_size = int(((mem_free - 400000) * 1000) / (4 * n * N))\n return chunks_size\n else:\n raise MemoryError(\"\\nERROR: DBSCAN_multiplex @ get_chunk_size:\\n\"\n \"this machine does not have enough free memory \"\n \"to perform the remaining computations.\\n\")", "def CalcRowSize( self, bits, image_width ):\n ## The size of each row is rounded up to the nearest multiple of 4 bytes.\n rowsize = int(((bits * image_width + 31) // 32)) * 4\n return rowsize", "def get_num_pieces(self):\n return self.num_pieces", "def n_dims(self):\n return self.pdm.n_dims", "def plaquette_rows_cols(self):\n return self.nMinorRows, self.nMinorCols", "def get_number_rows(rk_settings, rock_height, star_height):\r\n\tavailable_space_y = (rk_settings.screen_height -\r\n\t\t\t\t\t\t(3 * star_height) - rock_height)\r\n\tnumber_rows = int(available_space_y / (2 * star_height))\r\n\treturn number_rows", "def compute_size(h, w, n):\n\n res = []\n for x in [h, w]:\n for i in range(n):\n x = compute_conv(x, 3, 1, 1)\n x = compute_pool(x)\n res.append(x)\n return res", "def get_img_output_length(width, height):\n def get_output_length(input_length):\n return input_length//16\n\n return get_output_length(width), get_output_length(height)", "def rows(self) -> int:\n return self.__squares.__len__()", "def dimensions(m):\n n = len(m)\n assert n > 0\n p = len(m[0])\n assert p > 0\n for r in m:\n assert len(r) == p\n return (n, p)", "def get_num_tiles(grid_bbox, dxy): \r\n xmin, xmax, ymin, ymax = grid_bbox\r\n return (int(np.abs(ymax-ymin)/dxy), int(np.abs(xmax-xmin)/dxy))", "def size(self) -> Tuple[int, int]:\n return self._width, self._height", "def size(self) -> Tuple[int, int]:\n return (self.width, self.height)", "def get_dimensions(image_path):\n with Image.open(image_path) as img:\n return img.size", "def dim(self):\n return self.m, self.n", "def get_combined_size(tiles):\n # TODO: Refactor calculating layout to avoid repetition.\n columns, rows = calc_columns_rows(len(tiles))\n tile_size = tiles[0].image.size\n return (tile_size[0] * columns, tile_size[1] * rows)", "def get_image_size(self, **kwargs):\n points = kwargs['points']\n max_val = points.max(0)\n min_val = points.min(0)\n height = np.ceil((max_val[0] - min_val[0]) * self.res_x).astype(int)\n width = np.ceil((max_val[1] - min_val[1]) * self.res_y).astype(int)\n\n return height, width", "def numPixels(self):\n\t\treturn self.size", "def numPixels(self):\n\t\treturn self.size", "def get_size_of_grid(self):\n row = 0\n column = 0\n if int(self.var1.get()) == 1:\n row, column = 6, 6\n\n if int(self.var2.get()) == 1:\n row, column = 7, 6\n\n if int(self.var3.get()) == 1:\n row, column = 7, 7\n\n if int(self.var4.get()) == 1:\n row, column = 8, 8\n\n return row, column", "def numRowsCols(array):\n return len(array),len(array[0])", "def array_dimensions(array):\n height = len(array)\n width = len(array[0])\n\n return width, height", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelF_GetNumberOfComponents()", "def canvas_size(self):\r\n width = height = 0\r\n for image in self.images:\r\n x = image.x + image.absolute_width\r\n y = image.y + image.absolute_height\r\n if width < x:\r\n width = x\r\n if height < y:\r\n height = y\r\n return round_up(width), round_up(height)", "def get_size(self):\n lines = len(self.coefficients)\n columns = 0 if lines == 0 else len(self.coefficients[0])\n return lines, columns", "def dimensions_of_box(box: ndarray) -> Tuple[float, float]:\n\n (top_left, _, bottom_right, _) = box\n\n (x1, y1) = top_left\n (x2, y2) = bottom_right\n\n return (x2 - x1, y2 - y1)", "def dim(self):\n return (self.n, )", "def get_dimensions(n=1):\n if n <= len(self._dimensions): return self._dimensions[n-1]", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def get_num_fields(x_shape, field_height, field_width, padding, stride):\n N, C, H, W = x_shape\n if (W + 2 * padding - field_width) % stride != 0:\n raise ValueError('Invalid params for im2col; width does not work')\n if (H + 2 * padding - field_height) % stride != 0:\n raise ValueError('Invalid params for im2col; height does not work')\n\n # There are WW and HH receptive fields in the x and y direction respectively\n WW = (W + 2 * padding - field_width) / stride + 1\n HH = (H + 2 * padding - field_height) / stride + 1\n\n return HH, WW", "def _get_nparts(filename,headersize,itemsize):\n return (os.path.getsize(filename)-headersize)/itemsize", "def calculate_size(self, num_dots):\n self.objects = num_dots\n square = sqrt(self.objects)\n if self.objects % square == 0:\n return int(square), int(square)\n else:\n denom = self.objects // sqrt(self.objects)\n while self.objects % denom != 0:\n denom -= 1\n return int(denom), int(self.objects // denom)", "def get_image_shape(self) -> Tuple[int, int]:\n x = self.header.get(\"Rows\")\n y = self.header.get(\"Columns\")\n if x is not None and y is not None:\n return (x // self.size, y // self.size)", "def dim(self) -> tuple:\n if self.expr_list: return (self.expr_list[0].size()[0], len(self.expr_list)) + self.expr_list[0].size()[1:]\n elif self.expr_tensor is not None: return self.expr_tensor.size()\n elif self.expr_transposed_tensor is not None:\n return (self.expr_transposed_tensor.size()[1], self.expr_transposed_tensor.size()[0]) + self.expr_transposed_tensor.size()[2:]\n else:\n raise NotImplementedError()", "def dim(self) -> Tuple[Tuple[int, int], Tuple[int, int]]:", "def optimal_chunksizes(nt, nlat, nlon):\n\n clon = np.sqrt(1000000.0 * nlon / (nlat * nt))\n clat = nlat * clon / nlon\n return (nt, int(np.ceil(clat)), int(np.ceil(clon)))", "def num_divisors(n):\n\tif n < 2:\n\t\treturn 1 \t# not really correct\n\t\n\tdivisors = 1\n\ti = 2\n\n\twhile n > 1:\n\t\tp = 0 \t# p will be the maximum x such that i^x evenly divides n\n\n\t\t# repeatedly divide n by i, and store the number of times into p\n\t\twhile (n % i == 0):\n\t\t\tn = n / i\n\t\t\tp += 1\n\n\t\tdivisors = divisors * (p + 1)\n\t\ti += 1\n\n\treturn divisors", "def get_image_size(self, **kwargs):\n fov_height = np.abs(self.fov_pitch[1] - self.fov_pitch[0])\n fov_width = np.abs(self.fov_yaw[1] - self.fov_yaw[0])\n height = np.ceil(fov_height * self.res_pitch).astype(int)\n width = np.ceil(fov_width * self.res_yaw).astype(int)\n\n return height, width", "def get_grid_width(puzzle: str) -> int:\r\n return int(len(puzzle) ** (1 / 2))", "def n_dims(self):\n return len(self.dimensions)", "def count_partitions(n, m):\n # print(n, m)\n if n == 0:\n return 1\n elif n < 0:\n return 0\n elif m == 0:\n return 0\n else:\n return count_partitions(n-m, m) + count_partitions(n, m//2)", "def get_nb_element_per_dimension(recipe):\n return len(recipe[\"r\"]), len(recipe[\"c\"]), len(recipe[\"z\"])", "def dimensions():", "def get_dimensions(view: View, path: str):\n\n # Allow max automatic detection and remove gutter\n max_width, max_height = view.viewport_extent()\n max_width *= 0.75\n max_height *= 0.75\n max_ratio = max_height / max_width\n\n # Get image dimensions\n try:\n width, height, _ = get_image_size(path)\n except UnknownImageFormat:\n return -1, -1\n\n # First check height since it's the smallest vector\n if height / width >= max_ratio and height > max_height:\n ratio = max_height / height\n width *= ratio\n height *= ratio\n elif height / width <= max_ratio and width > max_width:\n ratio = max_width / width\n width *= ratio\n height *= ratio\n\n return width, height", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()" ]
[ "0.73097175", "0.7159921", "0.6962047", "0.69273585", "0.6925103", "0.6749133", "0.66736543", "0.6470178", "0.64632785", "0.63872576", "0.6368298", "0.62814194", "0.6258692", "0.6197939", "0.61734897", "0.6155329", "0.6155302", "0.61514986", "0.61438143", "0.6133328", "0.61312526", "0.6130664", "0.6118315", "0.61169475", "0.61026084", "0.6093603", "0.60616916", "0.60548323", "0.60413", "0.60203373", "0.6013206", "0.60109293", "0.6010862", "0.5978588", "0.5978588", "0.5956776", "0.5935351", "0.5934253", "0.5918015", "0.590795", "0.5896773", "0.5896245", "0.5885314", "0.58803916", "0.58797836", "0.58756053", "0.5869275", "0.5869275", "0.5869275", "0.5867929", "0.58657724", "0.58477646", "0.58345973", "0.58130634", "0.5809199", "0.5802594", "0.5797197", "0.5791285", "0.5790044", "0.5768813", "0.57675016", "0.5766121", "0.57638365", "0.5761568", "0.57497734", "0.5749161", "0.5738371", "0.5737829", "0.5728165", "0.5716764", "0.5708305", "0.57077116", "0.57077116", "0.5690657", "0.56899226", "0.56838113", "0.56787825", "0.5676447", "0.567292", "0.56702185", "0.5669263", "0.5661619", "0.56469065", "0.5644882", "0.5639677", "0.5634747", "0.5618437", "0.56106615", "0.56066537", "0.56029356", "0.5593407", "0.5591478", "0.5588447", "0.5587987", "0.55872023", "0.55856115", "0.55814004", "0.55786175", "0.5575142", "0.55734086" ]
0.79326427
0
Calculate combined size of tiles.
def get_combined_size(tiles): # TODO: Refactor calculating layout to avoid repetition. columns, rows = calc_columns_rows(len(tiles)) tile_size = tiles[0].image.size return (tile_size[0] * columns, tile_size[1] * rows)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tile_size_2d(self):\n return 32.0, 32.0", "def get_num_tiles(rows, cols, row_tile_size, col_tile_size):\n num_row_tiles = math.ceil(rows / row_tile_size)\n num_col_tiles = math.ceil(cols / col_tile_size)\n return num_row_tiles, num_col_tiles", "def get_tilesize(self, sampling):\n xsize = {\n 'T6': 600000,\n 'T3': 300000,\n 'T1': 100000\n }[self.get_tiletype(sampling)]\n ysize = {\n 'T6': 600000,\n 'T3': 300000,\n 'T1': 100000\n }[self.get_tiletype(sampling)]\n return xsize, ysize", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def getNumTiles(self):\n return self.w * self.h", "def getNumTiles(self):\n return (self.width) * (self.height)", "def getNumTiles(self):\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html\n return self.width * self.height", "def getNumTiles(self):\n return self.height * self.width", "def calc_size(self):\r\n pass", "def calculate_size(self, num_dots):\n self.objects = num_dots\n square = sqrt(self.objects)\n if self.objects % square == 0:\n return int(square), int(square)\n else:\n denom = self.objects // sqrt(self.objects)\n while self.objects % denom != 0:\n denom -= 1\n return int(denom), int(self.objects // denom)", "def output_image_size(n_patches_x, n_patches_y, patch_size):\n width = n_patches_x * patch_size\n height = n_patches_y * patch_size\n return width, height", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n self.size.value = tmpsize\n return self.size.value + self.ID.get_size() + self.size.get_size()", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize", "def calculate_min_max_tiles(self):", "def calc_size(cls) -> int:\n return calcsize('<' + cls.fmt)", "def _calculate_room_size(self):\n config = self.game.config\n\n short_side = min(config.map_height, config.map_width)\n\n largest_room_size = 0\n total_size = 0\n total_corridor_len = self.corridor_length * (self.grid_size - 1)\n for check_size in range(3, short_side, 2):\n all_rooms_len = check_size * self.grid_size\n rooms_and_corridors = all_rooms_len + total_corridor_len\n if rooms_and_corridors <= short_side:\n largest_room_size = check_size\n total_size = rooms_and_corridors\n else:\n break\n\n return largest_room_size, total_size", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corner_idx)\n corners.append(tile.opp_corner_idx)\n corners = np.stack(corners)\n\n # Find extremes\n min_yx = corners.min(axis=0)\n max_yx = corners.max(axis=0)\n\n size = max_yx - min_yx\n centre = -min_yx\n return tuple(size), centre", "def calculate_size(self):\n top_left_y = 0\n top_left_x = 0\n\n bottom_right_y = 1\n bottom_right_x = 1\n\n # TODO: calculate the correct bounds of the threat zone.\n\n raise NotImplementedError\n\n # if there is a sight_range for this map_obstacle then increase the size of the zone.\n if self.sight_range > 0:\n top_left_y += self.sight_range\n top_left_x += self.sight_range\n bottom_right_y += self.sight_range\n bottom_right_x += self.sight_range\n\n top_left = (top_left_y, top_left_x)\n bottom_right = (bottom_right_y, bottom_right_x)\n\n height = bottom_right_y - top_left_y\n width = bottom_right_x - top_left_x\n\n self.top_left_y = top_left_y\n self.top_left_x = top_left_x\n self.bottom_right_y = bottom_right_y\n self.bottom_right_x = bottom_right_x\n self.height = height\n self.width = width\n\n return (top_left, bottom_right, height, width)", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corners())\n corners = np.concatenate(corners)[:, :2] / self._pixel_shape\n\n # Find extremes, add 1 px margin to allow for rounding errors\n min_xy = corners.min(axis=0).astype(int) - 1\n max_xy = corners.max(axis=0).astype(int) + 1\n\n size = max_xy - min_xy\n centre = -min_xy\n # Switch xy -> yx\n return tuple(size[::-1]), centre[::-1]", "def get_tile_size(self, map_size = None, show_info = None):\n if not map_size: map_size = self.map_size\n w,h = self.img_size\n x_tiles,y_tiles = map_size\n\n tile_raw_w = w / x_tiles\n tile_raw_h = h / y_tiles\n\n if self.debug:\n print(f' ► Raw tile width: {tile_raw_w}\\n ► Raw tile height: {tile_raw_h}')\n\n tile_w = int(round(tile_raw_w))\n tile_h = int(round(tile_raw_h))\n\n if show_info:\n print(f' Image Size: {w} x {h} px\\n Tile Size: {tile_w} x {tile_h} px\\n Map Size: {x_tiles} x {y_tiles} tiles')\n\n error_w = tile_w - tile_raw_w\n error_h = tile_h - tile_raw_h\n print(f'\\n -=ERROR INFO=-\\n Tile Size Width Error: {round(error_w,4)} px \\n Tile Size Height Error: {round(error_h,4)} px \\n Total Width Rounding Error: {round(error_w * x_tiles,4)} px \\n Total Height Rounding Error: {round(error_h * y_tiles,4)} px\\n')\n\n return (tile_raw_w,tile_raw_h)", "def compute_combined_size(size_dict, modes):\n size = 1\n for mode in modes:\n size *= size_dict[mode]\n return size", "def size(self):\n return reduce(lambda x, ins: x + ins.size, self.instructions, 0)", "def get_tile_size(num_pixels, tile_size=400):\n\n # How many times can we repeat a tile of the desired size.\n num_tiles = int(round(num_pixels / tile_size))\n\n # Ensure that there is at least 1 tile.\n num_tiles = max(1, num_tiles)\n\n # The actual tile-size.\n actual_tile_size = math.ceil(num_pixels / num_tiles)\n\n return actual_tile_size", "def compute_outub_size(height, width, dtype, core_nums):\n ubuf_size = 100 * 1024 # ub whole size 100 * 1024 byte\n out_ele_perblock = compute_perblock_nums(dtype)\n out_blocks = math.ceil(height * width / out_ele_perblock)\n block_per_core = math.ceil(out_blocks / core_nums)\n use_cores = math.ceil(out_blocks / block_per_core)\n out_ele_size = cce.cce_intrin.get_bit_len(dtype) // BITS_NUMS\n out_f16_size = cce.cce_intrin.get_bit_len(\"float16\") // BITS_NUMS\n out_int8_size = cce.cce_intrin.get_bit_len(\"int8\") // BITS_NUMS\n if dtype in [\"int8\", \"uint8\"]:\n need_size = block_per_core * out_ele_perblock * (out_f16_size + out_int8_size)\n if need_size > ubuf_size:\n block_num = ubuf_size // (out_ele_perblock * (out_f16_size + out_int8_size))\n out_factor = math.ceil(block_per_core / block_num)\n last_remian = block_per_core % block_num\n else:\n block_num = block_per_core\n out_factor = 1\n last_remian = 0\n total_len = block_num * out_ele_perblock\n else:\n need_size = block_per_core * out_ele_size * out_ele_perblock\n if need_size > ubuf_size:\n block_num = ubuf_size // BYTES_PER_BLOCK\n out_factor = math.ceil(block_per_core / block_num)\n last_remian = block_per_core % block_num\n else:\n block_num = block_per_core\n out_factor = 1\n last_remian = 0\n total_len = block_num * out_ele_perblock\n\n return block_num, block_per_core, out_factor, last_remian, total_len, use_cores", "def get_size(self) -> int:\n total_size = 0\n for entry in self.__entries:\n total_size += entry.get_size()\n return total_size", "def compute_size(self):\n length = np.max(np.max(self.positions, axis=1) -\n np.min(self.positions, axis=1))\n return length + 2*self.get_radii().max()", "def compute_tile_size(total_size,\n tile_size_min=180,\n tile_size_max=512,\n tile_size_step=2,\n chunk_size=None,\n num_levels_min=None,\n int_div=False):\n\n ts = total_size\n num_levels = 0\n while ts % 2 == 0:\n ts2 = ts // 2\n if ts2 < tile_size_min:\n break\n ts = ts2\n num_levels += 1\n\n if ts <= tile_size_max and (not num_levels_min or num_levels >= num_levels_min):\n return ts\n\n min_penalty = 10 * total_size\n best_tile_size = None\n for ts in range(tile_size_min, tile_size_max + 1, tile_size_step):\n\n if int_div and total_size % ts:\n continue\n\n num_tiles = cardinal_div_round(total_size, ts)\n if num_levels_min:\n num_levels = cardinal_log2(num_tiles * ts)\n if num_levels < num_levels_min:\n continue\n\n total_size_excess = ts * num_tiles - total_size\n penalty = total_size_excess\n\n if chunk_size:\n num_chunks = cardinal_div_round(ts, chunk_size)\n tile_size_excess = ts * num_chunks - ts\n penalty += tile_size_excess\n\n if penalty < min_penalty:\n min_penalty = penalty\n best_tile_size = ts\n\n if not best_tile_size:\n # if no suitable tile size can be found, use the image untiled\n best_tile_size = total_size\n return best_tile_size", "def __len__(self) -> int:\n return len(self._tiles)", "def getSize(self):\n return (int(self.getWidth()), int(self.getHeight()))", "def get_size(self) -> Tuple2IntType:\n return self.get_width(), self.get_height()", "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def _cell_state_size(self):\n state_sizes = self._cells[0].state_size\n if isinstance(state_sizes, tuple):\n return sum(state_sizes)\n return state_sizes", "def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])", "def __len__(self) -> int:\n return self.width * self.height", "def get_composite_sizes(lfds):\n sizes = tuple(sum(ii) for ii in zip(*[ii.sizes for ii in lfds]))\n drange = (lfds[0].drange[0], lfds[-1].drange[1])\n\n return sizes, drange", "def __len__(self):\n return self.width * self.height", "def _SizeCalculator(partition_size):\n # Minus footer size to return max image size.\n return partition_size - int(math.pow(partition_size, 0.95))", "def Size(self) -> \"unsigned long long\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_Size(self)", "def getSize(self):\n return self.__width * self.__height;", "def get_map_size(self, map_major_dim=None):\n w, h = self.img_size\n mmd = map_major_dim\n if w >= h:\n x_tiles = mmd\n y_tiles = round(h / w * mmd)\n else:\n x_tiles = round(w / h * mmd)\n y_tiles = mmd\n\n return (x_tiles, y_tiles)", "def _get_final_size(param_grid):\n tmp = {} # same pattern than param_grid but store the size\n for idx, key in enumerate(param_grid.iterkeys()):\n if isinstance(param_grid[key], list):\n tmp[idx] = [sys.getsizeof(value) for value in param_grid[key]]\n else:\n tmp[idx] = [sys.getsizeof(param_grid[key])]\n return np.array([x for x in itertools.product(*tmp.values())]).sum()", "def calculate_area(building, pixel_size=1):\n return len(building.points) * (pixel_size**2)", "def totalsize(self):\n return sum([sz for sz in self.iterate()])", "def size(self) -> int:\n\n return self.sizes.sum()", "def Size(self) -> \"unsigned long long\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_Size(self)", "def PT_SizeL2(self, entry_size, L2BlockEntries):\n # pt_size = self.PT_SizeL1(entry_size)\n # num_blocks = pt_size / self.pagesize #blocks in L2 PT = number of entries in L1 PT\n coverage = L2BlockEntries * self.pagesize #Physical memory coverage (bytes) per L1 Page Table entry\n needed_L1_PTE = self.vmem / coverage\n return needed_L1_PTE * entry_size", "def expected_width(self):\n\t\treturn self.expected_tile_width * TILE_SIZE", "def get_typical_size(workers: List[List[int]]) -> int:\n size = 0\n for worker in workers:\n size = max([size,\n np.abs(worker[2]-worker[0]),\n np.abs(worker[3]-worker[1])])\n \n return size", "def size(self):\n return reduce(mul, self.shape, 1)", "def get_size(self, hdf):\n return sum([sys.getsizeof(hdf[p]) for p in hdf.list_nodes()]) + sum(\n [self.get_size(hdf[p]) for p in hdf.list_groups()]\n )", "def tileWidth(self):\n return self._tileWidth", "def size(self) -> Tuple[int, int]:\n return (self.width, self.height)", "def max_cell_print_len(self):\n m = 0\n for r in range(1, self.height + 1):\n for c in range(1, self.width + 1):\n l = 0\n for item in self.list_things_at((r, c)):\n #print 'max_cell_print_len:', item\n l += len(item.to_string())\n if l > m:\n m = l\n return m", "def get_block_size( coords ):\n return [ x[1]-x[0] for x in coords ]", "def _SizeCalculator(partition_size):\n # Max image size grows less than partition size, which means\n # footer size grows faster than partition size.\n return int(math.pow(partition_size, 0.95))", "def get_num_tiles(grid_bbox, dxy): \r\n xmin, xmax, ymin, ymax = grid_bbox\r\n return (int(np.abs(ymax-ymin)/dxy), int(np.abs(xmax-xmin)/dxy))", "def dimensions():", "def getSize(self) -> long:\n ...", "def compute_size(requested_width, requested_height, rev_width, time_height):\n pic_width = 0\n pic_height = 0\n if (requested_width is not None and requested_height is not None):\n pic_height = requested_height\n pic_width = requested_width\n \n elif (requested_width is not None):\n pic_width = requested_width\n pic_height = pic_width * (float(time_height) / rev_width)\n \n elif (requested_height is not None):\n pic_height = requested_height\n pic_width = pic_height * (float(rev_width) / time_height)\n \n else:\n pic_height = 800\n pic_width = max(rev_width*3\n , pic_height * (float(rev_width) / time_height))\n \n return (pic_width, pic_height)", "def optimal_chunksizes(nt, nlat, nlon):\n\n clon = np.sqrt(1000000.0 * nlon / (nlat * nt))\n clat = nlat * clon / nlon\n return (nt, int(np.ceil(clat)), int(np.ceil(clon)))", "def getSize(self):\n if self.subsym == None:\n if self.size == 0:\n return 1\n else:\n return self.size\n else:\n if self.size == 0:\n return self.subsym.getSize()\n else:\n return self.size * self.subsym.getSize()", "def numTilePossibilities(self, tiles: str) -> int:\n def generate(tiles: str, length: int) -> int:\n count = 0\n for i, symbol in enumerate(tiles):\n if i and tiles[i - 1] == symbol:\n continue\n if length > 1:\n count += generate(tiles[:i] + tiles[i+1:], length - 1)\n else:\n count += 1\n return count\n \n count = 0\n tiles = ''.join(sorted(tiles))\n for i in range(len(tiles)):\n count += generate(tiles, i + 1)\n return count", "def size(self):\n size = 1\n for current_slice in self.slices:\n size *= current_slice.stop - current_slice.start\n return size", "def __len__(self):\n a = 1\n for size in self.sizes:\n a *= size\n return a", "def getPixelSize(self):\n return (0.000013, 0.000013)", "def calculatearea(self):\r\n return self.width * self.height", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def size(self) -> Tuple[int, int]:\n return self._width, self._height", "def add_tile(self, tile):\r\n payload_size = self.payload.add_content(tile.as_bits())\r\n self.size = self.header.size + payload_size + self.padding.size\r\n return self.size", "def size(self):\n\n # Replace by correct code\n if self.right and self.left:\n return self.left.height() + self.right.height()\n elif not self.right:\n return self.left.size()\n elif not self.left:\n return self.right.size()\n else:\n return self._size", "def calculate_dimensions(self):\n x_coordinates = np.sort(self.grid['x'][:, 0]) # first x node\n self.nr_nodes_z = np.where(x_coordinates == x_coordinates[0])[0].size\n self.nr_elements_x = self.elements.shape[0] / (self.nr_nodes_z - 1)\n self.nr_nodes_x = self.nr_elements_x + 1\n self.nr_elements_z = self.nr_nodes_z - 1", "def layers_sizes(self):\n return iter([self.delta_h*l for l in range(int(self.h/self.delta_h)-1)])", "def area(self):\n return int(self.__size) * int(self.__size)", "def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))", "def get_image_size(self):", "def part1():\n program = read_input()\n root = build_filesystem(program)\n all_sizes = root.make_size_list()\n return sum(size for size in all_sizes if size <= 100000)", "def mapSize(self):\n return len(self._cells)", "def __getSideLength(self, width, height):\n\n # Get screen size from config file.\n with open(\"config.txt\") as f:\n config = json.loads(f.read())\n\n tileWidth = config[\"screenWidth\"]\n tileHeight = config[\"screenHeight\"]\n\n # Get max tile height and width.\n tileHeight = math.floor(tileHeight / (height+2))\n tileWidth = math.floor(tileWidth / (width+2))\n\n # Get the smallest of the two so the tile can be square.\n if tileHeight > tileWidth:\n sideLength = tileWidth\n else:\n sideLength = tileHeight\n\n return sideLength", "def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50", "def calculatesize(self, size):\n wsize = self.layout.size\n x = (wsize[0] * size[0]) / 100\n y = (wsize[1] * size[1]) / 100\n return x, y", "def _step_size(self, renderer):\n return (self.symbol_width + self.spacing + self._padding) * self._size_pixels(renderer)", "def _step_size(self, renderer):\n return (self.symbol_width + self.spacing + self._padding) * self._size_pixels(renderer)", "def getBoxsize(stepCount,stepHeight,stepWidth,platformWidth,stairsLength,distance):\n #///重新给box的三个属性赋值\n box_width = (stepCount-1)*stepWidth + platformWidth\n box_length = (stairsLength*2+distance) \n #distance = box_length-stairsLength*2\n box_height = stepCount*2*stepHeight\n #print (\"box_length:%s,box_width:%s,box_height:%s\"%(box_length,box_width,box_height))\n return box_length,box_width,box_height", "def __len__(self) -> int:\n return sum(target.quantity for target in self.target_sizes)", "def canvas_size(self):\r\n width = height = 0\r\n for image in self.images:\r\n x = image.x + image.absolute_width\r\n y = image.y + image.absolute_height\r\n if width < x:\r\n width = x\r\n if height < y:\r\n height = y\r\n return round_up(width), round_up(height)", "def get_total_tiles_building_area(tile_ind_list, session):\n\n total_area_ml, total_area_osm = 0, 0\n for row in session.query(TilePredBA).filter(\n TilePredBA.tile_index.in_(tile_ind_list)):\n total_area_ml += row.building_area_ml\n total_area_osm += row.building_area_osm\n\n return total_area_ml, total_area_osm", "def size(cls):\n return (cls.num_properties()*2 + 2)", "def get_state_size(self) -> Tuple[int, int]:\n return self.height, self.width", "def __len__(self):\n return len(self.__squares) * len(self.__squares[0])", "def size(self):\n return (self.width(), self.height())", "def size(self) -> typing.Tuple[int, int]:\n return self.width, self.height", "def get_need_size_for_each_dimension(used_coordinates: list, num_dimensions) -> list:\n max_coordinates = [0] * num_dimensions\n for coordinate in used_coordinates:\n for dimension in range(num_dimensions):\n max_coordinates[dimension] = max(max_coordinates[dimension], coordinate[dimension] + 1)\n return max_coordinates", "def getNumTiles(self):\n\t\treturn self.numTiles", "def get_size(self):\n return get_dir_size(self.run_dir)", "def tileHeight(self):\n return self._tileHeight", "def __sizeof__(self):\r\n\r\n S = 0 # Full size of the object\r\n if self.loss_list is not None:\r\n for value in self.loss_list:\r\n S += getsizeof(value)\r\n if self.meshsol_list is not None:\r\n for value in self.meshsol_list:\r\n S += getsizeof(value)\r\n if self.loss_index is not None:\r\n for key, value in self.loss_index.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.logger_name)\r\n if self.axes_dict is not None:\r\n for key, value in self.axes_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.Pstator)\r\n S += getsizeof(self.Protor)\r\n S += getsizeof(self.Pmagnet)\r\n S += getsizeof(self.Pprox)\r\n S += getsizeof(self.Pjoule)\r\n if self.coeff_dict is not None:\r\n for key, value in self.coeff_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n return S", "def _adjacent_blob_size(self, pos, board, visited) -> int:\n col, row = pos[0], pos[1]\n total = 0\n total += self._undiscovered_blob_size((col - 1, row), board, visited)\n total += self._undiscovered_blob_size((col, row - 1), board, visited)\n total += self._undiscovered_blob_size((col + 1, row), board, visited)\n total += self._undiscovered_blob_size((col, row + 1), board, visited)\n return total", "def size(self):\n return (self.width, self.height)" ]
[ "0.6917832", "0.6854386", "0.66924536", "0.66773844", "0.6626963", "0.6563682", "0.6505493", "0.6481431", "0.64496267", "0.641139", "0.63941205", "0.631634", "0.6312036", "0.627973", "0.62632513", "0.625943", "0.6246556", "0.621888", "0.6217926", "0.62107056", "0.61723995", "0.6150797", "0.6135983", "0.61272764", "0.6107911", "0.6065271", "0.60528404", "0.6042425", "0.6030999", "0.6018477", "0.60179275", "0.60142714", "0.5957989", "0.59441847", "0.5941643", "0.5928922", "0.5917522", "0.59149116", "0.5913863", "0.59028697", "0.5897652", "0.5895181", "0.58702695", "0.58478653", "0.5833449", "0.58238083", "0.5823663", "0.58076733", "0.5799486", "0.5798194", "0.5797162", "0.57770085", "0.5769709", "0.5767493", "0.5767007", "0.57564837", "0.5750821", "0.57489866", "0.57456625", "0.57313836", "0.57280135", "0.5717281", "0.5714835", "0.5707625", "0.57053804", "0.56996334", "0.5693389", "0.56869256", "0.56869256", "0.5682648", "0.5679262", "0.56627923", "0.5661642", "0.5659131", "0.56563765", "0.5653105", "0.5650472", "0.5647439", "0.5641402", "0.5637636", "0.5636833", "0.56334096", "0.5630725", "0.5630725", "0.5625047", "0.5617156", "0.56167424", "0.5613037", "0.56062996", "0.56034094", "0.56032264", "0.5600721", "0.5598995", "0.5598606", "0.55963296", "0.5594263", "0.5585276", "0.5583059", "0.55720085", "0.5571841" ]
0.8319171
0
``tiles`` Tuple of ``Image`` instances. ``width`` Optional, width of combined image. ``height`` Optional, height of combined image. ``Image`` instance.
def join(tiles, width=0, height=0): # Don't calculate size if width and height are provided # this allows an application that knows what the # combined size should be to construct an image when # pieces are missing. if width > 0 and height > 0: im = Image.new("RGBA", (width, height), None) else: im = Image.new("RGBA", get_combined_size(tiles), None) columns, rows = calc_columns_rows(len(tiles)) for tile in tiles: try: im.paste(tile.image, tile.coords) except IOError: # do nothing, blank out the image continue return im
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _tile_images(imgs, tile_shape, concatenated_image, margin_color=None):\n x_num, y_num = tile_shape\n one_width = imgs[0].shape[1]\n one_height = imgs[0].shape[0]\n if concatenated_image is None:\n concatenated_image = np.zeros((one_height * y_num, one_width * x_num, 3),\n dtype=np.uint8)\n if margin_color is not None:\n concatenated_image[:, :] = margin_color\n for y in range(y_num):\n for x in range(x_num):\n i = x + y * x_num\n if i >= len(imgs):\n pass\n else:\n concatenated_image[y*one_height:(y+1)*one_height,x*one_width:(x+1)*one_width,] = imgs[i]\n return concatenated_image", "def combine_images(images: list) -> Image:\n img_width = images[0][0].width\n img_height = images[0][0].height\n new_size = (img_width * len(images[0]), img_height * len(images))\n new_image = Image.new('RGB', new_size)\n\n # Add all the images from the grid to the new, blank image\n for rowindex, row in enumerate(images):\n for colindex, image in enumerate(row):\n location = (colindex * img_width, rowindex * img_height)\n new_image.paste(image, location)\n\n return new_image", "def tile_images(image_stack):\n assert len(image_stack.shape) == 4\n image_list = [image_stack[i, :, :, :] for i in range(image_stack.shape[0])]\n tiled_images = np.concatenate(image_list, axis=1)\n return tiled_images", "def tiles(self, width: int, height: int) -> TileSet:\n y_count = len(self.tiling)\n for y_index, y_tile in enumerate(self.tiling):\n\n x_count = len(y_tile)\n for x_index, tile_strength in enumerate(y_tile):\n\n # Doing multiplication before devision here to make sure rounding is correct\n bounding_box = (\n # from (x1, y1)\n int(width * x_index / x_count),\n int(height * y_index / y_count),\n # to (x2, y2)\n int(width * (x_index + 1) / x_count),\n int(height * (y_index + 1) / y_count),\n )\n\n yield bounding_box, tile_strength", "def combine_pictures(images):\n widths, heights = zip(*(i.size for i in images))\n\n total_width = sum(widths)\n max_height = max(heights)\n\n new_im = Image.new('RGB', (total_width, max_height))\n\n x_offset = 0\n for im in images:\n new_im.paste(im, (x_offset, 0))\n x_offset += im.size[0]\n\n new_im.save('test.jpg')\n\n return True", "def tile_image(\n im: Image.Image, width: int, height: int, mode: Optional[str] = \"RGB\", **kwargs: Any\n) -> Image.Image:\n im_out = Image.new(mode, (width, height), **kwargs)\n\n h_tiles = ceil(width / im.width)\n v_tiles = ceil(height / im.height)\n\n for i in range(v_tiles):\n y = im.height * i\n for j in range(h_tiles):\n x = im.width * j\n im_out.paste(im, box=(x, y))\n\n return im_out", "def get_tile_image(imgs, tile_shape=None, result_img=None, margin_color=None):\n def get_tile_shape(img_num):\n x_num = 0\n y_num = int(math.sqrt(img_num))\n while x_num * y_num < img_num:\n x_num += 1\n return x_num, y_num\n\n if tile_shape is None:\n tile_shape = get_tile_shape(len(imgs))\n\n # get max tile size to which each image should be resized\n max_height, max_width = np.inf, np.inf\n for img in imgs:\n max_height = min([max_height, img.shape[0]])\n max_width = min([max_width, img.shape[1]])\n\n # resize and concatenate images\n for i, img in enumerate(imgs):\n h, w = img.shape[:2]\n h_scale, w_scale = max_height / h, max_width / w\n scale = min([h_scale, w_scale])\n h, w = int(scale * h), int(scale * w)\n img = cv2.resize(img, (w, h))\n img = centerize(img, (max_height, max_width, 3),\n margin_color=margin_color)\n imgs[i] = img\n return _tile_images(imgs, tile_shape, result_img,\n margin_color=margin_color)", "def get_combined_size(tiles):\n # TODO: Refactor calculating layout to avoid repetition.\n columns, rows = calc_columns_rows(len(tiles))\n tile_size = tiles[0].image.size\n return (tile_size[0] * columns, tile_size[1] * rows)", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def pack_image_nest(cls, imgs):\n assert rpack is not None, \"You need to install rectangle-packer first!\"\n\n imgs = nest.flatten(imgs)\n if len(imgs) == 0:\n return\n\n # first get all images' sizes (w,h)\n sizes = [(i.shape[1], i.shape[0]) for i in imgs]\n # call rpack for an approximate solution: [(x,y),...] positions\n positions = rpack.pack(sizes)\n # compute the height and width of the enclosing rectangle\n H, W = 0, 0\n for size, pos in zip(sizes, positions):\n H = max(H, pos[1] + size[1])\n W = max(W, pos[0] + size[0])\n\n packed_img = np.full((H, W, 3), 255, dtype=np.uint8)\n for pos, img in zip(positions, imgs):\n packed_img[pos[1]:pos[1] + img.shape[0], pos[0]:pos[0] +\n img.shape[1], :] = img.data\n return cls(packed_img)", "def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images", "def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles // 2))\n )\n tile_width = math.ceil(w / (self._n_tiles // 2))\n tiles = [] # type: ignore\n for i in range(0, h, tile_height):\n for j in range(0, w, tile_width):\n tiles.append(\n (\n (i, i + tile_height, j, j + tile_width),\n image[i : i + tile_height, j : j + tile_width, :],\n )\n )\n return tiles", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\r\n scale_rows_to_unit_interval=True,\r\n output_pixel_vals=True):\r\n\r\n assert len(img_shape) == 2\r\n assert len(tile_shape) == 2\r\n assert len(tile_spacing) == 2\r\n\r\n # The expression below can be re-written in a more C style as\r\n # follows :\r\n #\r\n # out_shape = [0,0]\r\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\r\n # tile_spacing[0]\r\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\r\n # tile_spacing[1]\r\n out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp\r\n in zip(img_shape, tile_shape, tile_spacing)]\r\n\r\n if isinstance(X, tuple):\r\n assert len(X) == 4\r\n # Create an output numpy ndarray to store the image\r\n if output_pixel_vals:\r\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\r\n dtype='uint8')\r\n else:\r\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\r\n dtype=X.dtype)\r\n\r\n #colors default to 0, alpha defaults to 1 (opaque)\r\n if output_pixel_vals:\r\n channel_defaults = [0, 0, 0, 255]\r\n else:\r\n channel_defaults = [0., 0., 0., 1.]\r\n\r\n for i in xrange(4):\r\n if X[i] is None:\r\n # if channel is None, fill it with zeros of the correct\r\n # dtype\r\n dt = out_array.dtype\r\n if output_pixel_vals:\r\n dt = 'uint8'\r\n out_array[:, :, i] = numpy.zeros(out_shape,\r\n dtype=dt) + channel_defaults[i]\r\n else:\r\n # use a recurrent call to compute the channel and store it\r\n # in the output\r\n out_array[:, :, i] = tile_raster_images(\r\n X[i], img_shape, tile_shape, tile_spacing,\r\n scale_rows_to_unit_interval, output_pixel_vals)\r\n return out_array\r\n\r\n else:\r\n # if we are dealing with only one channel\r\n H, W = img_shape\r\n Hs, Ws = tile_spacing\r\n\r\n # generate a matrix to store the output\r\n dt = X.dtype\r\n if output_pixel_vals:\r\n dt = 'uint8'\r\n out_array = numpy.zeros(out_shape, dtype=dt)\r\n\r\n for tile_row in xrange(tile_shape[0]):\r\n for tile_col in xrange(tile_shape[1]):\r\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\r\n this_x = X[tile_row * tile_shape[1] + tile_col]\r\n if scale_rows_to_unit_interval:\r\n # if we should scale values to be between 0 and 1\r\n # do this by calling the `scale_to_unit_interval`\r\n # function\r\n this_img = scale_to_unit_interval(\r\n this_x.reshape(img_shape))\r\n else:\r\n this_img = this_x.reshape(img_shape)\r\n # add the slice to the corresponding position in the\r\n # output array\r\n c = 1\r\n if output_pixel_vals:\r\n c = 255\r\n out_array[\r\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\r\n tile_col * (W + Ws): tile_col * (W + Ws) + W\r\n ] = this_img * c\r\n return out_array", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n if output_pixel_vals:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = numpy.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = numpy.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n \n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n \n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n \n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n # colors default to 0 (i.e. black), alphas defaults to 1 (fully opaque i.e.\n # corresponding pixel fully visible in image))\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8') \n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype) \n\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n \n for i in range(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n \n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n \n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.ones(out_shape, dtype=dt)*255\n \n for tile_row in range(tile_shape[0]):\n for tile_col in range(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output np ndarray to store the image\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n # colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # functionmapping\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def stitch_map(tiles, width, height, bbox, dpi):\n size = (int(width * dpi_to_dpmm(dpi)), int(height * dpi_to_dpmm(dpi)))\n background = Image.new('RGBA', size, (255, 255, 255))\n for layer in tiles:\n layer_img = Image.new(\"RGBA\", size)\n for (x, y), tile_path in layer.items():\n tile = Image.open(tile_path)\n layer_img.paste(tile, ((x - bbox.min.x) * TILE_SIZE, (y - bbox.min.y) * TILE_SIZE))\n background = Image.alpha_composite(background, layer_img)\n add_scales_bar(background, bbox)\n return background.convert(\"RGB\")", "def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]", "def slice(\n filename,\n number_tiles=None,\n col=None,\n row=None,\n save=True,\n DecompressionBombWarning=True,\n):\n if DecompressionBombWarning is False:\n Image.MAX_IMAGE_PIXELS = None\n\n im = Image.open(filename)\n im_w, im_h = im.size\n\n columns = 0\n rows = 0\n if number_tiles:\n validate_image(im, number_tiles)\n columns, rows = calc_columns_rows(number_tiles)\n else:\n validate_image_col_row(im, col, row)\n columns = col\n rows = row\n\n tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))\n\n tiles = []\n number = 1\n for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.\n for pos_x in range(0, im_w - columns, tile_w): # as above.\n area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)\n image = im.crop(area)\n position = (int(floor(pos_x / tile_w)) + 1, int(floor(pos_y / tile_h)) + 1)\n coords = (pos_x, pos_y)\n tile = Tile(image, number, position, coords)\n tiles.append(tile)\n number += 1\n if save:\n save_tiles(\n tiles, prefix=get_basename(filename), directory=os.path.dirname(filename)\n )\n return tuple(tiles)", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))", "def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):\n x_axis = -1\n y_axis = -2\n arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]\n\n x_ntiles = (\n arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1\n )\n y_ntiles = (\n arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1\n )\n\n tiles = []\n\n # row\n for i in range(0, y_ntiles):\n # height of this tile\n ver_f = tile_h * i\n ver_t = ver_f + tile_h\n\n # col\n for j in range(0, x_ntiles):\n # width of this tile\n hor_f = tile_w * j\n hor_t = hor_f + tile_w\n\n tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)\n\n tiles.append(tile)\n tile_shape = [tile_h, tile_w]\n ntiles = dict(x=x_ntiles, y=y_ntiles)\n padding = dict(left=0, right=0, top=0, bottom=0)\n if arr_width % tile_w == 0:\n padding[\"right\"] = 0\n else:\n padding[\"right\"] = tile_w - (arr_width % tile_w)\n if arr_height % tile_h == 0:\n padding[\"bottom\"] = 0\n else:\n padding[\"bottom\"] = tile_h - (arr_height % tile_h)\n info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)\n return tiles, info", "def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();", "def test_unbounded_tileset_image(self):\n\t\t# Create an 8x6 tileset image placeholder\n\t\tself.expected_tile_width = 8\n\t\tself.expected_tile_height = 6\n\t\tself.expected_rows = self.expected_tile_height\n\t\tself.expected_cols = self.expected_tile_width\n\n\t\tself.test_image = dummy_image(self.expected_width(), self.expected_height())\n\t\tself.test_image_grid = TextureGrid(ImageGrid(self.test_image, self.expected_rows, self.expected_cols))\n\n\t\t# Test creating a TilesetImage without specifying dimensions\n\t\tself.tileset_image = TilesetImage(self.test_image)\n\n\t\tself.assert_tileset_image('Rows and columns not specified.')", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n if output_pixel_vals:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = numpy.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = numpy.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def mbtiles(ctx, files, output, overwrite, title, description,\n layer_type, img_format, tile_size, zoom_levels, image_dump,\n num_workers, src_nodata, dst_nodata, resampling):\n output, files = resolve_inout(files=files, output=output,\n overwrite=overwrite)\n inputfile = files[0]\n\n logger = logging.getLogger('rio-mbtiles')\n\n with ctx.obj['env']:\n\n # Read metadata from the source dataset.\n with rasterio.open(inputfile) as src:\n\n validate_nodata(dst_nodata, src_nodata, src.profile.get('nodata'))\n base_kwds = {'dst_nodata': dst_nodata, 'src_nodata': src_nodata}\n\n if src_nodata is not None:\n base_kwds.update(nodata=src_nodata)\n\n if dst_nodata is not None:\n base_kwds.update(nodata=dst_nodata)\n\n # Name and description.\n title = title or os.path.basename(src.name)\n description = description or src.name\n\n # Compute the geographic bounding box of the dataset.\n (west, east), (south, north) = transform(\n src.crs, 'EPSG:4326', src.bounds[::2], src.bounds[1::2])\n\n # Resolve the minimum and maximum zoom levels for export.\n if zoom_levels:\n minzoom, maxzoom = map(int, zoom_levels.split('..'))\n else:\n zw = int(round(math.log(360.0 / (east - west), 2.0)))\n zh = int(round(math.log(170.1022 / (north - south), 2.0)))\n minzoom = min(zw, zh)\n maxzoom = max(zw, zh)\n\n logger.debug(\"Zoom range: %d..%d\", minzoom, maxzoom)\n\n # Parameters for creation of tile images.\n base_kwds.update({\n 'driver': img_format.upper(),\n 'dtype': 'uint8',\n 'nodata': 0,\n 'height': tile_size,\n 'width': tile_size,\n 'count': 3,\n 'crs': TILES_CRS})\n\n img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'\n\n # Initialize the sqlite db.\n if os.path.exists(output):\n os.unlink(output)\n # workaround for bug here: https://bugs.python.org/issue27126\n sqlite3.connect(':memory:').close()\n\n conn = sqlite3.connect(output)\n cur = conn.cursor()\n cur.execute(\n \"CREATE TABLE tiles \"\n \"(zoom_level integer, tile_column integer, \"\n \"tile_row integer, tile_data blob);\")\n cur.execute(\n \"CREATE TABLE metadata (name text, value text);\")\n\n # Insert mbtiles metadata into db.\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"name\", title))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"type\", layer_type))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"version\", \"1.1\"))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"description\", description))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"format\", img_ext))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"bounds\", \"%f,%f,%f,%f\" % (west, south, east, north)))\n\n conn.commit()\n\n # Create a pool of workers to process tile tasks.\n pool = Pool(num_workers, init_worker,\n (inputfile, base_kwds, resampling), 100)\n\n # Constrain bounds.\n EPS = 1.0e-10\n west = max(-180 + EPS, west)\n south = max(-85.051129, south)\n east = min(180 - EPS, east)\n north = min(85.051129, north)\n\n # Initialize iterator over output tiles.\n tiles = mercantile.tiles(\n west, south, east, north, range(minzoom, maxzoom + 1))\n\n for tile, contents in pool.imap_unordered(process_tile, tiles):\n\n if contents is None:\n logger.info(\"Tile %r is empty and will be skipped\", tile)\n continue\n\n # MBTiles has a different origin than Mercantile/tilebelt.\n tiley = int(math.pow(2, tile.z)) - tile.y - 1\n\n # Optional image dump.\n if image_dump:\n img_name = '%d-%d-%d.%s' % (\n tile.x, tiley, tile.z, img_ext)\n img_path = os.path.join(image_dump, img_name)\n with open(img_path, 'wb') as img:\n img.write(contents)\n\n # Insert tile into db.\n cur.execute(\n \"INSERT INTO tiles \"\n \"(zoom_level, tile_column, tile_row, tile_data) \"\n \"VALUES (?, ?, ?, ?);\",\n (tile.z, tile.x, tiley, buffer(contents)))\n\n conn.commit()\n\n conn.close()\n # Done!", "def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images", "def build_tiles(img,tilefile,tilesize,options=[]):\n\tlevels=ceil(log(max(img.get_xsize(),img.get_ysize())/tilesize)/log(2.0))\n\t\n\ttf=file(tilefile,\"w\")\n\t\n\ttile_dict={}\n\tpos=0\n\timg2=img.copy()\n\txs,ys=img2.get_xsize(),img2.get_ysize()\n\tfor l in range(int(levels)):\n\t\trmin=img2.get_attr(\"mean\")-img2.get_attr(\"sigma\")*3.0\n\t\trmax=img2.get_attr(\"mean\")+img2.get_attr(\"sigma\")*3.0\n\t\tfor x in range(0,img2.get_xsize(),tilesize):\n\t\t\tfor y in range(0,img2.get_ysize(),tilesize):\n\t\t\t\ti=img2.get_clip(Region(x,y,tilesize,tilesize))\n\t\t\t\ti.set_attr(\"render_min\",rmin)\n\t\t\t\ti.set_attr(\"render_max\",rmax)\n\t\t\t\ti.set_attr(\"jpeg_quality\",70)\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ti.write_image(fsp)\n\t\t\t\tsz=os.stat(fsp).st_size\n\t\t\t\ttile_dict[(l,x/tilesize,y/tilesize)]=(pos,sz)\n\t\t\t\tpos+=sz\n\t\timg2.process_inplace(\"math.meanshrink\",{\"n\":2})\n\t\n\t# This will produce 2 power spectrum images in the tile file\n\t# with scale factors -1 and -2\n\tif \"pspec\" in options :\n\t\tnx,ny=img.get_xsize()/512,img.get_ysize()/512\n\t\ta=EMData()\n\t\ta.set_size(512,512)\n\t\tif (ny>2 and nx>2) :\n\t\t\tfor y in range(1,ny-1):\n\t\t\t\tfor x in range(1,nx-1):\n\t\t\t\t\tc=img.get_clip(Region(x*512,y*512,512,512))\n\t\t\t\t\tc.process_inplace(\"normalize\")\n\t\t\t\t\tc.process_inplace(\"math.realtofft\")\n\t\t\t\t\tc.process_inplace(\"math.squared\")\n\t\t\t\t\ta+=c\n\t\t\ta.set_value_at(256,256,0,.01)\n\t\t\ta-=a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.01\n\t\t\ta.process_inplace(\"math.log\")\n\t\t\ta-=a.get_attr(\"minimum\")\n\t\t\ta.set_attr(\"render_min\",a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.1)\n\t\t\ta.set_attr(\"render_max\",a.get_attr(\"mean\")+a.get_attr(\"sigma\")*4.0)\n\t\t\ta.set_attr(\"jepg_quality\",80)\n\t\t\ta.write_image(\"/tmp/tmpimg.mrc\")\n\t\t\tfsp=\"tmpimg.jpg\"\n\t\t\ta.write_image(fsp)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-1,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\t\n#\t\ttry:\n\t\t\timport matplotlib\n\t\t\tmatplotlib.use('Agg')\n\t\t\timport pylab\n\t\t\tmanager = pylab.get_current_fig_manager()\n\t\t\tapix=options[\"pspec\"]\n\t\t\tdx=1.0/(2.0*apix*256.0)\n\t\t\tx=pylab.arange(dx,dx*255.9,dx)\n\t\t\ty=a.calc_radial_dist(255,1,1,0)\t# radial power spectrum (log)\n\t\t\tpylab.figure(figsize=(8,6),dpi=96)\n\t\t\tpylab.axes([.08,.08,.9,.9], axisbg='w')\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.axis([0,dx*256,min(y),max(y)])\n\t\t\tpylab.xlabel(\"Spatial Freq. (1/A)\")\n\t\t\tpylab.ylabel(\"Log Intensity (10^x)\")\n#\t\t\tprint y\n\t\t\t\n\t\t\tfsp=\"tmpimg2.png\"\n\t\t\tpylab.savefig(fsp,dpi=96)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-2,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\n#\t\texcept:\n#\t\t\tprint \"Unable to generate plot (need matplotlib)\"\n\t\t\t\n\t\n\tpickle.dump(tile_dict,tf)\n\t\n\tfor l in range(int(levels)):\n\t\tfor x in range(0,xs,tilesize):\n\t\t\tfor y in range(0,ys,tilesize):\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ta=file(fsp,\"r\")\n\t\t\t\tb=a.read()\n\t\t\t\ta.close()\n\t\t\t\ttf.write(b)\n\t\t\t\tos.remove(fsp)\n\t\txs/=2\n\t\tys/=2\n\t\n\tif \"pspec\" in options :\n\t\tfor fsp in [\"tmpimg.jpg\",\"tmpimg2.png\"] :\n\t\t\ta=file(fsp,\"r\")\n\t\t\tb=a.read()\n\t\t\ta.close()\n\t\t\ttf.write(b)\n#\t\t\tos.remove(fsp)\n\t\n\ttf.close()", "def __init__(self, width, height):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.numTiles = width*height\n\t\tself.tiles = []\n\t\tfor i in range(0, width):\n\t\t\tfor j in range(0, height):\n\t\t\t\tself.tiles.append(Tile(i, j))", "def MakeCoordPlot(tiles, coords, image_size=10000, boarder_width=20):\n tile_size = tiles.shape[1]\n\n grid_coords = Cloud2Grid(\n coords, grid_dim=(image_size - 2 * tile_size), tile_size=tile_size\n )\n grid_coords = grid_coords + tile_size # for black boarder\n grid_image = Image.new(\"RGB\", (image_size, image_size))\n for i in range(len(tiles)): # paste each tile onto image\n tile = ColorTileBoarder(tiles[i], channel=0, boarder_width=2)\n tile = Image.fromarray(tiles[i])\n x, y = grid_coords[i, :]\n grid_image.paste(tile, (int(x), int(y)))\n coords[\"grid1\"] = grid_coords[:, 0] + tile_size // 2\n coords[\"grid2\"] = grid_coords[:, 1] + tile_size // 2\n return grid_image, coords", "def combine(filenames, size=None, number=None, dimensions=None):\n # some guards\n if filenames is None or len(filenames) == 0:\n print('Not enough files provided')\n return\n\n if number is None:\n number = 1\n\n # dimensions overrules number\n if dimensions is None:\n dimensions = Dimensions(1, number)\n else:\n number = dimensions.rows * dimensions.columns\n\n if size is None:\n size = Size(400, 200) \n\n # copy and shuffle\n shuffled = filenames[:]\n random.shuffle(shuffled)\n \n # pick one base image to fill the canvas\n base = shuffled[0]\n rest = shuffled[1:]\n\n # create grayscale versions\n images = map(image, shuffled)\n grayscales = list(map(make_grayscale, images))\n\n # create a new image and paste the grayscales\n combined = list()\n for _ in range(number):\n combined.append(combine_images(grayscales, size=size))\n\n show_collage(combined, dimensions)", "def create_image(self, image_location, width, height):\n tile_image = pygame.image.load(image_location).convert_alpha()\n # The tile is a square and the height is expected to be smaller than the width\n tile_width = width\n tile_height = height\n tile_image = pygame.transform.scale(tile_image, (tile_width, tile_height))\n\n # The self.image attribute expects a Surface, so we can manually create one and \"blit\" the tile image onto the surface (i.e. paint an image onto a surface).\n # We use list comprehension to quickly make the blits_data list of tuples (each tuple has the tile image, and the X and Y coordinates)\n # Don't know what list comprehension is? Go look it up on the Internet. That's what all professional software engineers do ;)\n image = pygame.Surface((width, height))\n blits_data = [(tile_image, (tile_width * i, 0)) for i in range(math.ceil(width / tile_width))]\n image.blits(blits_data)\n\n return image", "def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list", "def __init__(\n self,\n tile_id,\n image_path_list=None,\n image_sequence_duration=None,\n allowed_transport=tiledata.DEFAULT_TRANSPORTATION,\n ):\n # Each tile_id should map to a distinct Tile.\n self._tile_id = tile_id\n\n self._image_sequence_duration = image_sequence_duration\n\n # Represents the base terrain image (e.g. grass, water).\n self._image_list = []\n if image_path_list:\n for image_path in image_path_list:\n rendered_image = pygame.image.load(image_path).convert_alpha()\n if rendered_image:\n self._image_list.append(rendered_image)\n else:\n LOGGER.error(\n \"Error rendering tile image %s\",\n image_path\n )\n sys.exit(2)\n else:\n # Use default tile image.\n image_path = imagepaths.TILE_DEFAULT_PATH\n rendered_image = pygame.image.load(\n image_path\n ).convert_alpha()\n\n if rendered_image:\n self._image_list.append(rendered_image)\n else:\n LOGGER.error(\n \"Error rendering tile image %s\",\n image_path\n )\n sys.exit(2)\n\n self._allowed_transport = allowed_transport\n\n self._individual_image_duration = None\n if self._image_sequence_duration and self._image_list:\n self._individual_image_duration = int(\n self._image_sequence_duration / len(self._image_list)\n )", "def put_multi(self, tag, tiles, ttl=0):\n\n for tile in tiles:\n self.put(tag, tile, ttl=ttl)", "def __init__(self, width, height, tilesize=256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize))\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.append(tiles)\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append(imagesize)\n\n while (imagesize[0] > tilesize or imagesize[1] > tilesize):\n imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))\n tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))\n self.tierSizeInTiles.append(tiles)\n self.tierImageSize.append(imagesize)\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] +\n self.tileCountUpToTier[i-1]\n )", "def construct_image(imgs):\n\n # todo fill missing pieces and\n\n if len(imgs) == 0:\n return None\n # taking the first\n w, h = imgs[0][1].size\n img_array = order_2d(imgs)\n x_count = len(img_array[0])\n y_count = len(img_array)\n height = h * y_count\n width = w * x_count\n new_im = Image.new('RGB', (width, height))\n for y in range(y_count):\n for x in range(x_count):\n _, im = img_array[y][x]\n new_im.paste(im, (x * w, y * h))\n return new_im", "def render_tiles(self, tiles):\n for row in tiles:\n for tile in row:\n if tile is not None:\n if tile.height < 0:\n color = (0, 100, 0)\n else:\n z = max(0, tile.height)\n color = tuple([z * 255] * 3)\n self.surface.set_at((tile.x, tile.y), color)", "def test_tiled():\n size = [25, 25]\n img = Image.new('RGB', (10, 10))\n img.putpixel((5, 5), (0, 255, 0))\n\n parameters = {'data': [img], 'size': size}\n\n tiled = images.tiled(parameters)\n\n assert_equal(tiled.size, tuple(size))\n assert_equal(tiled.getpixel((5, 5)), (0, 255, 0))\n assert_equal(tiled.getpixel((15, 5)), (0, 255, 0))", "def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")", "def sample_tiles(self, fc, image_spec, export_radius, tags=None):\n image_spec, output_bands = self._get_image_spec_helper(image_spec, tags)\n fc = add_imagery(fc, image_spec, output_size=export_radius)\n return fc, output_bands", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def __createTiles(self, length, width, height):\n\n rectangles = []\n centrePoints = []\n \n # Defines the dimensions required to fit all tiles\n totalHeight = length * height\n totalWidth = length * width\n \n # Go through all tiles\n y = length\n while y < totalHeight + length:\n\n x = length\n while x < totalWidth + length:\n # Creates a Rect object\n rectangle = pygame.Rect(x, y, length, length)\n rectangles.append(rectangle)\n\n # Calculates the tile's centre point.\n centrePoint = (math.floor(x + length/2), math.floor(y + length/2))\n centrePoints.append(centrePoint)\n\n x += length\n y += length\n\n return rectangles, centrePoints", "def concat_images(images, axis=0):\n # Get the width and the heights\n widths, heights = zip(*(i.size for i in images))\n\n # Initalize an offset to append the next image to the end of the previous\n offset = 0\n\n # Concatenate along the lines\n if axis == 1:\n # Get the width of the final image and the height\n max_width = max(widths)\n total_height = sum(heights)\n # Initalize the final image with the first subimage\n new_im = Image.new(images[0].mode, (max_width, total_height))\n\n # Append all consecutive images\n for im in images:\n new_im.paste(im, (0, offset))\n offset += im.size[1]\n\n # Concatenate along the columns\n else:\n # Get the width and the height of the final image\n total_width = sum(widths)\n max_height = max(heights)\n # Initalize the final image with the first subimage\n new_im = Image.new(images[0].mode, (total_width, max_height))\n\n # Append all consecutive images\n for im in images:\n new_im.paste(im, (offset, 0))\n offset += im.size[0]\n\n return new_im", "def AppendImages(im1, im2):\r\n im1cols, im1rows = im1.size\r\n im2cols, im2rows = im2.size\r\n im3 = Image.new('RGB', (im1cols+im2cols, max(im1rows,im2rows)))\r\n im3.paste(im1,(0,0))\r\n im3.paste(im2,(im1cols,0))\r\n return im3", "def make_board(self):\n http = urllib3.PoolManager()\n r = http.request('GET', 'http://www.cse.msu.edu/~ruppmatt/itm891/tiles.pickle')\n tiles = pickle.loads(r.data)\n self.assets = tiles\n self.gameboard = Image.new('RGBA', (64*(self.world_width+2), 64*(self.world_height+2)))\n # Laydown land\n for c in range(0,self.world_width):\n for r in range(0, self.world_height):\n x = (c+1)*64\n y = (r+1)*64\n tile_ndx = np.random.choice(len(tiles['land']))\n self.gameboard.paste(tiles['land'][tile_ndx], (x,y)) \n # Laydown water\n for c in range(0,self.world_width):\n x = (c+1)*64\n yy = (self.world_height+1)*64\n self.gameboard.paste(tiles['water']['edge_north'], (x,0))\n self.gameboard.paste(tiles['water']['edge_south'], (x, yy))\n for r in range(0,self.world_height):\n y = (r+1)*64\n xx = (self.world_width+1)*64\n self.gameboard.paste(tiles['water']['edge_west'], (0,y))\n self.gameboard.paste(tiles['water']['edge_east'], (xx,y))\n self.gameboard.paste(tiles['water']['corner_nw'], (0,0))\n self.gameboard.paste(tiles['water']['corner_sw'], (0,(self.world_height+1)*64))\n self.gameboard.paste(tiles['water']['corner_ne'], ((self.world_width+1)*64,0))\n self.gameboard.paste(tiles['water']['corner_se'], ((self.world_width+1)*64,(self.world_height+1)*64))\n \n # Some land lines\n draw = ImageDraw.Draw(self.gameboard)\n for c in range(0,self.world_width-1):\n y_1 = 64\n y_2 = 64*(self.world_height+1)\n x = (2+c)*64\n draw.line([(x,y_1),(x,y_2)], fill='white', width=1)\n for r in range(0,self.world_height-1):\n y = (2+r)*64\n x_1= 64\n x_2 = 64 * (self.world_width+1)\n draw.line([(x_1,y),(x_2,y)], fill='white', width=1)\n return", "def tiles_to_images(wfc_ns, tile_grid, tile_catalog, tile_size, visualize=False, partial=False, grid_count=None):\n new_img = np.zeros((tile_grid.shape[0] * tile_size, tile_grid.shape[1] * tile_size, wfc_ns.channels), dtype=np.int64)\n if partial and (len(tile_grid.shape) > 2):\n for i in range(tile_grid.shape[0]):\n for j in range(tile_grid.shape[1]):\n for u in range(wfc_ns.tile_size):\n for v in range(wfc_ns.tile_size):\n pixel_merge_list = []\n for k in range(tile_grid.shape[2]):\n tile = tile_grid[i,j,k]\n ## If we want to display a partial pattern, it is helpful to\n ## be able to show empty cells. Therefore, in visualize mode,\n ## we use -1 as a magic number for a non-existant tile.\n pixel = None#[200, 0, 200]\n #print(tile)\n if (visualize) and ((-1 == tile) or (-2 == tile)):\n if (-1 == tile):\n pixel = [200, 0, 200]\n if 0 == (i + j) % 2:\n pixel = [255, 0, 255]\n else:\n pixel = [0, 255, 255]\n else:\n if (WFC_PARTIAL_BLANK != tile) and (WFC_NULL_VALUE != tile): # TODO: instead of -3, use MaskedArrays\n pixel = tile_catalog[tile][u,v]\n if not(pixel is None):\n pixel_merge_list.append(pixel)\n if len(pixel_merge_list) == 0:\n if 0 == (i + j) % 2:\n pixel_merge_list.append([255, 0, 255])\n else:\n pixel_merge_list.append([0, 172, 172])\n \n if len(pixel_merge_list) > 0:\n pixel_to_add = pixel_merge_list[0]\n if len(pixel_merge_list) > 1:\n pixel_to_add = [round(sum(x) / len(pixel_merge_list)) for x in zip(*pixel_merge_list)]\n try:\n while (len(pixel_to_add) < wfc_ns.channels):\n pixel_to_add.append(255)\n new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v] = pixel_to_add\n except TypeError as e:\n wfc_logger.warning(e)\n wfc_logger.warning(\"Tried to add {} from {}\".format(pixel_to_add, pixel_merge_list))\n else:\n for i in range(tile_grid.shape[0]):\n for j in range(tile_grid.shape[1]):\n tile = tile_grid[i,j]\n for u in range(wfc_ns.tile_size):\n for v in range(wfc_ns.tile_size):\n ## If we want to display a partial pattern, it is helpful to\n ## be able to show empty cells. Therefore, in visualize mode,\n ## we use -1 as a magic number for a non-existant tile.\n pixel = [200, 0, 200]\n #print(f\"tile: {tile}\")\n if (visualize) and ((-1 == tile) or (-2 == tile)):\n if (-1 == tile):\n if 0 == (i + j) % 2:\n pixel = [255, 0, 255]\n if (-2 == tile):\n pixel = [0, 255, 255]\n else:\n if (WFC_PARTIAL_BLANK != tile):\n pixel = tile_catalog[tile][u,v]\n # Watch out for images with more than 3 channels!\n new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v] = np.resize(pixel, new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v].shape)\n logging.debug('Output image shape is', new_img.shape)\n return new_img", "def combineImages(path=None, imgfiles=None, cols=3, size=300):\n\n font = ImageFont.truetype(\"Arial.ttf\", 15)\n x=size\n w=20\n i=0; j=0\n if imgfiles == None:\n imgfiles = findFiles(path, 'png')\n width = cols*(x+w)\n height = int(math.ceil(float(len(imgfiles))/cols)*x)\n new_im = Image.new('RGBA', (width, height), 'white')\n for f in imgfiles:\n name = os.path.basename(f).split('.')[0]\n if not os.path.exists(f):\n continue\n im = Image.open(f)\n im.thumbnail((x,x))\n new_im.paste(im, (i*x+w,j*x+w))\n draw = ImageDraw.Draw(new_im)\n draw.text((i*x+w,j*x+w), name, (0,0,0), font=font)\n i+=1\n if i>=cols:\n i=0; j+=1\n #new_im.show()\n path = os.path.split(imgfiles[0])[0]\n new_im.save(os.path.join(path,\"summary.png\"))\n return", "def compare_images(image1, image2, method='diff', *, n_tiles=(8, 8)):\n if image1.shape != image2.shape:\n raise ValueError('Images must have the same shape.')\n\n img1 = img_as_float(image1)\n img2 = img_as_float(image2)\n\n if method == 'diff':\n comparison = np.abs(img2 - img1)\n elif method == 'blend':\n comparison = 0.5 * (img2 + img1)\n elif method == 'checkerboard':\n shapex, shapey = img1.shape\n mask = np.full((shapex, shapey), False)\n stepx = int(shapex / n_tiles[0])\n stepy = int(shapey / n_tiles[1])\n for i, j in product(range(n_tiles[0]), range(n_tiles[1])):\n if (i + j) % 2 == 0:\n mask[i * stepx:(i + 1)*stepx, j * stepy:(j + 1) * stepy] = True\n comparison = np.zeros_like(img1)\n comparison[mask] = img1[mask]\n comparison[~mask] = img2[~mask]\n else:\n raise ValueError('Wrong value for `method`. '\n 'Must be either \"diff\", \"blend\" or \"checkerboard\".')\n return comparison", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target", "def __init__(self, width, height, tilesize = 256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = ( math.ceil( width / tilesize ), math.ceil( height / tilesize ) )\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.push( tiles )\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append( imagesize );\n\n while (imagesize[0] > tilesize or imageSize[1] > tilesize ):\n imagesize = (math.floor( imagesize[0] / 2 ), math.floor( imagesize[1] / 2) )\n tiles = ( math.ceil( imagesize[0] / tilesize ), math.ceil( imagesize[1] / tilesize ) )\n self.tierSizeInTiles.append( tiles )\n self.tierImageSize.append( imagesize )\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] + self.tileCountUpToTier[i-1]\n )", "def add_tile(self, input_name, multiples, name=None):\n return self._build_op('Tile', [input_name, multiples], name=name)", "def _mergeTiles(self, base, tile, x, y):\n # Replace non blank pixels, aggregating opacity appropriately\n x = int(round(x))\n y = int(round(y))\n if base is None and not x and not y:\n return tile\n if base is None:\n base = np.zeros((0, 0, tile.shape[2]), dtype=tile.dtype)\n base, tile = _makeSameChannelDepth(base, tile)\n if base.shape[0] < tile.shape[0] + y:\n vfill = np.zeros(\n (tile.shape[0] + y - base.shape[0], base.shape[1], base.shape[2]),\n dtype=base.dtype)\n if base.shape[2] == 2 or base.shape[2] == 4:\n vfill[:, :, -1] = 1\n base = np.vstack((base, vfill))\n if base.shape[1] < tile.shape[1] + x:\n hfill = np.zeros(\n (base.shape[0], tile.shape[1] + x - base.shape[1], base.shape[2]),\n dtype=base.dtype)\n if base.shape[2] == 2 or base.shape[2] == 4:\n hfill[:, :, -1] = 1\n base = np.hstack((base, hfill))\n if base.flags.writeable is False:\n base = base.copy()\n base[y:y + tile.shape[0], x:x + tile.shape[1], :] = tile\n return base", "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]", "def merge_images(sources, targets):\n batch_size, channels, h, w = sources.shape\n rows = int(np.sqrt(batch_size))\n cols = int(math.ceil(batch_size / rows))\n grid = np.zeros([channels, rows*h, cols*w*2], dtype=np.uint8)\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // cols\n j = idx % cols\n grid[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s\n grid[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t\n grid = grid.transpose(1, 2, 0)\n return grid", "def copy_tiles(self):\n \n return self.tiles", "def load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None): #-> [images]\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None): #-> [images]\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def split_image_into_number_of_tiles(\n arr: Image, x_ntiles: int, y_ntiles: int, overlap: int\n):\n img_width, img_height = arr.shape[-1], arr.shape[-2]\n tile_w = img_width // x_ntiles\n tile_h = img_height // y_ntiles\n return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)", "def draw_grid(self, tile_img, tiles):\n #debug_print(\"drawing level\", data)\n img = Surface((self.xsize * SIZE, self.ysize * SIZE))\n for pos, char in self:\n rect = get_tile_rect(pos)\n img.blit(tile_img, rect, tiles[char])\n return img", "def stack_images(cls, imgs, horizontal=True):\n assert isinstance(imgs, (list, tuple))\n if horizontal:\n H = max([i.shape[0] for i in imgs])\n W = sum([i.shape[1] for i in imgs])\n stacked_img = np.full((H, W, 3), 255, dtype=np.uint8)\n offset_w = 0\n for i in imgs:\n stacked_img[:i.shape[0], offset_w:offset_w +\n i.shape[1], :] = i.data\n offset_w += i.shape[1]\n else:\n H = sum([i.shape[0] for i in imgs])\n W = max([i.shape[1] for i in imgs])\n stacked_img = np.full((H, W, 3), 255, dtype=np.uint8)\n offset_h = 0\n for i in imgs:\n stacked_img[offset_h:offset_h +\n i.shape[0], :i.shape[1], :] = i.data\n offset_h += i.shape[0]\n\n return cls(stacked_img)", "def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles", "def paste_chaos(image, tiles, size, shadow_off_set=(30, 30)):\n # image_all = Image.new('RGB', image.size, 0xffffff)\n image_all = image\n lst = range(len(tiles))\n random.shuffle(lst)\n fragment_size = (image.size[0] / size[0], image.size[1] / size[1])\n print 'tiles size %d X %d' % fragment_size\n print 'number of tiles one iteration: %d' % len(lst)\n for i in lst:\n im = Image.open(tiles[i])\n degree = random.randint(-20, 20)\n im = thumbnail(rotate_image(drop_shadow(add_frame(im), shadow_off_set), degree), (fragment_size[0] * 3 / 2, fragment_size[1] * 3 / 2))\n x = i % size[0] * fragment_size[0] + random.randrange(-fragment_size[0] / 2, fragment_size[0] / 2)\n y = i / size[0] * fragment_size[1] + random.randrange(-fragment_size[1] / 2, fragment_size[1] / 2)\n # print x, y\n image_all.paste(im, (x, y), im)\n return image_all", "def merge_images(images, axis=0):\n assert axis in [0, 1]\n total_len = sum(map(lambda i: i.size[axis], images))\n if axis == 0:\n new_shape = (total_len, images[0].size[1])\n step = images[0].size[0]\n else:\n new_shape = (images[0].size[0], total_len)\n step = images[0].size[1]\n\n canvas = Image.new('RGB', new_shape)\n\n shift = 0\n for image in images:\n if axis == 0:\n canvas.paste(image, (shift, 0))\n else:\n canvas.paste(image, (0, shift))\n \n shift += image.size[axis]\n\n return canvas", "def __init__(self, width, height):\n self.clean_tiles = []\n self.width = width\n self.height = height", "def build_tiles(cls):\n\n LOGGER.debug(\"Building tiles\")\n\n for tile_id in tiledata.TILE_DATA:\n if not Tile.tile_factory(tile_id):\n LOGGER.error(\"Could not construct tile with ID %d\", tile_id)\n sys.exit(1)", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.numTiles = width * height\n\n for w in range(0,width):\n for h in range(0,height):\n #NOTE--float width,height as tuple keys don't work?!\n #so could not use Position(), since those x,y's can be floats\n #tuples of ints (w,h) could be used\n self.tiles[(w,h)] = 0 # value of key tuple (w,h) = 0 = dirty (or vice versa, 1 = clean)\n #self.printTiles()\n #raise NotImplementedError", "def split_image_with_bboxes(bboxes, image, tiles=4):\n\n if tiles == 0:\n return {(0, 0): {\"image\": image, \"bboxes\": bboxes}}\n assert tiles % 2 == 0, \"Error in splitting images. Uneven number of images requested.\"\n\n split = tiles / 2\n\n height, width, *_ = image.shape\n\n new_height = height / split\n new_width = width / split\n\n tiles = {}\n\n tile_height = new_height\n\n for row in range(int(split)):\n tile_width = new_width\n for col in range(int(split)):\n\n # Create image with true values on tile\n canvas = np.zeros_like(image)\n tile_start = (int(tile_height-new_height), int(tile_width-new_width))\n tile_end = (int(tile_height), int(tile_width))\n canvas[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]] = 1\n\n new_bboxes = []\n for bbox in bboxes:\n\n xmin, ymin, xmax, ymax = bbox\n\n # Overlap of image tile and bbox\n bbox_image = np.zeros_like(image)\n bbox_image[ymin:ymax, xmin:xmax] = 1\n\n overlap = np.logical_and(canvas, bbox_image)\n\n if np.sum(overlap) < 1:\n continue\n\n overlap_index = np.argwhere(overlap)\n\n overlap_xmin, overlap_ymin = overlap_index[0][1], overlap_index[0][0]\n overlap_xmax, overlap_ymax = overlap_index[-1][1]+1, overlap_index[-1][0]+1\n\n new_xmin = overlap_xmin - col * new_width\n new_ymin = overlap_ymin - row * new_height\n new_xmax = overlap_xmax - col * new_width\n new_ymax = overlap_ymax - row * new_height\n\n new_bbox = (new_xmin, new_ymin, new_xmax, new_ymax)\n\n new_bboxes.append(new_bbox)\n\n cropped_image = image[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]]\n tiles[(row, col)] = {\"image\": cropped_image, \"bboxes\": new_bboxes}\n\n tile_width = tile_width + new_width\n tile_height = tile_height + new_height\n\n return tiles", "def pack_images(images, rows, cols):\n shape = tf.shape(images)\n width = shape[-3]\n height = shape[-2]\n depth = shape[-1]\n images = tf.reshape(images, (-1, width, height, depth))\n batch = tf.shape(images)[0]\n rows = tf.minimum(rows, batch)\n cols = tf.minimum(batch // rows, cols)\n images = images[:rows * cols]\n images = tf.reshape(images, (rows, cols, width, height, depth))\n images = tf.transpose(images, [0, 2, 1, 3, 4])\n images = tf.reshape(images, [1, rows * width, cols * height, depth])\n return images", "def pack_images(images, rows, cols):\n shape = tf.shape(input=images)\n width = shape[-3]\n height = shape[-2]\n depth = shape[-1]\n images = tf.reshape(images, (-1, width, height, depth))\n batch = tf.shape(input=images)[0]\n rows = tf.minimum(rows, batch)\n cols = tf.minimum(batch // rows, cols)\n images = images[:rows * cols]\n images = tf.reshape(images, (rows, cols, width, height, depth))\n images = tf.transpose(a=images, perm=[0, 2, 1, 3, 4])\n images = tf.reshape(images, [1, rows * width, cols * height, depth])\n return images", "def get_image_tiles_tensor(image, label, image_path, patch_width):\n tiles_before_reshape = tensorflow.extract_image_patches(\n tensorflow.expand_dims(image, dim=0), [1, patch_width, patch_width, 1],\n [1, patch_width, patch_width, 1], [1, 1, 1, 1], 'VALID')\n tiles = tensorflow.reshape(tiles_before_reshape, [-1, patch_width, patch_width, 1])\n\n labels = tensorflow.tile(tensorflow.expand_dims(label, dim=0), [tensorflow.shape(tiles)[0], 1])\n image_paths = tensorflow.tile(\n tensorflow.expand_dims(image_path, dim=0), [tensorflow.shape(tiles)[0], 1])\n\n return tiles, labels, image_paths", "def images_square_grid(images, mode):\n # Get maximum size for square grid of images\n save_size = math.floor(np.sqrt(images.shape[0]))\n\n # Scale to 0-255\n images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)\n\n # Put images in a square arrangement\n images_in_square = np.reshape(\n images[:save_size*save_size],\n (save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))\n if mode == 'L':\n images_in_square = np.squeeze(images_in_square, 4)\n\n # Combine images to grid image\n new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))\n for col_i, col_images in enumerate(images_in_square):\n for image_i, image in enumerate(col_images):\n im = Image.fromarray(image, mode)\n new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))\n\n return new_im", "def test_bounded_tileset_image(self):\n\t\t# Create an 8x6 tileset image placeholder\n\t\tself.expected_tile_width = 8\n\t\tself.expected_tile_height = 6\n\t\tself.expected_rows = 5\n\t\tself.expected_cols = 4\n\n\t\tself.test_image = dummy_image(self.expected_width(), self.expected_height())\n\t\tself.test_image_grid = TextureGrid(ImageGrid(self.test_image, self.expected_rows, self.expected_cols))\n\n\t\t# Test creating a TilesetImage with specific dimensions\n\t\tself.tileset_image = TilesetImage(self.test_image, rows=self.expected_rows, cols=self.expected_cols)\n\n\t\tself.assert_tileset_image('Rows and columns not specified.')", "def save_tiles(tiles, prefix=\"\", directory=os.getcwd(), format=\"png\"):\n for tile in tiles:\n tile.save(\n filename=tile.generate_filename(\n prefix=prefix, directory=directory, format=format\n ),\n format=format,\n )\n return tuple(tiles)", "def _get_tiles(self, width: Numeric) -> List[Polygon]:\n min_x, min_y, max_x, max_y = self._get_rounded_bounding_box(self.geom, width)\n tiles = []\n\n for i in range(0, int((max_x - min_x) / width)):\n for j in range(0, int((max_y - min_y) / width)):\n tile = box(\n (i * width) + min_x,\n (j * width) + min_y,\n ((i + 1) * width) + min_x,\n ((j + 1) * width) + min_y,\n )\n\n if self.geom.intersects(tile):\n tiles.append(tile)\n\n return tiles", "def tile(self, (z, x, y)):\n output = self.cache.read((z, x, y))\n if output is None:\n # logger.info(_(\"TilesManager.tile calling sources.tile: \") )\n pass\n output = self.reader.tile(z, x, y)\n if output is None:\n return None\n # Blend layers\n if len(self._layers) > 0:\n logger.debug(_(\"Will blend %s layer(s)\") % len(self._layers))\n output = self._blend_layers(output, (z, x, y))\n # Apply filters\n for f in self._filters:\n image = f.process(self._tile_image(output))\n output = self._image_tile(image)\n # Save result to cache\n self.cache.save(output, (z, x, y))\n self.rendered += 1\n return output", "def get_images(self):\n return [self.get_image(i) for i in range(0, self.number_sprites - 1)]", "def image_range_to_tiles(start_ind, end_ind, display=False, save_summary=True, save_data=True, save_top_tiles=True):\n image_list = list()\n tile_summaries_dict = dict()\n for slide_name in range(start_ind, end_ind + 1):\n tile_summary = summary_and_tiles(slide_name, display, save_summary, save_data, save_top_tiles)\n image_list.append(slide_name)\n tile_summaries_dict[slide_name] = tile_summary\n return image_list, tile_summaries_dict", "def merge_tiles(tiles, funcs=None):\n return np.vstack([\n np.hstack([\n reduce(lambda acc, f: f(acc), funcs, x) if funcs else x\n for x in row\n ])\n for row in tiles\n ])", "def get_stacked_maps_image(self) -> PIL.Image:\n num_tokens = len(self.token_ids)\n if num_tokens == 0:\n return None\n\n latents_height = self.latents_shape[0]\n latents_width = self.latents_shape[1]\n\n merged = None\n\n for key, maps in self.collated_maps.items():\n # maps has shape [(H*W), N] for N tokens\n # but we want [N, H, W]\n this_scale_factor = math.sqrt(maps.shape[0] / (latents_width * latents_height))\n this_maps_height = int(float(latents_height) * this_scale_factor)\n this_maps_width = int(float(latents_width) * this_scale_factor)\n # and we need to do some dimension juggling\n maps = torch.reshape(\n torch.swapdims(maps, 0, 1),\n [num_tokens, this_maps_height, this_maps_width],\n )\n\n # scale to output size if necessary\n if this_scale_factor != 1:\n maps = tv_resize(maps, [latents_height, latents_width], InterpolationMode.BICUBIC)\n\n # normalize\n maps_min = torch.min(maps)\n maps_range = torch.max(maps) - maps_min\n # print(f\"map {key} size {[this_maps_width, this_maps_height]} range {[maps_min, maps_min + maps_range]}\")\n maps_normalized = (maps - maps_min) / maps_range\n # expand to (-0.1, 1.1) and clamp\n maps_normalized_expanded = maps_normalized * 1.1 - 0.05\n maps_normalized_expanded_clamped = torch.clamp(maps_normalized_expanded, 0, 1)\n\n # merge together, producing a vertical stack\n maps_stacked = torch.reshape(\n maps_normalized_expanded_clamped,\n [num_tokens * latents_height, latents_width],\n )\n\n if merged is None:\n merged = maps_stacked\n else:\n # screen blend\n merged = 1 - (1 - maps_stacked) * (1 - merged)\n\n if merged is None:\n return None\n\n merged_bytes = merged.mul(0xFF).byte()\n return PIL.Image.fromarray(merged_bytes.numpy(), mode=\"L\")", "def image_list_to_tiles(image_list, display=False, save_summary=True, save_data=True, save_top_tiles=True):\n tile_summaries_dict = dict()\n for slide_name in image_list:\n tile_summary = summary_and_tiles(slide_name, display, save_summary, save_data, save_top_tiles)\n tile_summaries_dict[slide_name] = tile_summary\n return image_list, tile_summaries_dict", "def merge_images(filenames, outfile, vgap=20):\n images = [Image.open(filename) for filename in filenames]\n\n widths = [image.size[0] for image in images]\n heights = [image.size[1] for image in images]\n\n result_width = max(widths)\n result_height = sum(heights) + len(images) * vgap\n\n result = Image.new('RGB', (result_width, result_height), (255, 255, 255))\n y = 0\n for image in images:\n result.paste(im=image, box=(0, y))\n y += image.size[1] + vgap\n\n\n result.save(outfile)", "def get_tiles(self):\n\n tiles = []\n for x in range(self.position[0],\n self.position[0] + CAR_LENGTH if self.is_horizontal else self.position[0] + CAR_WIDTH):\n for y in range(self.position[1],\n self.position[1] + CAR_WIDTH if self.is_horizontal else self.position[1] + CAR_LENGTH):\n tiles.append((x, y))\n\n return tiles", "def make(self) -> None:\n\n # arbitrarily selecting the first image from the list, index 0\n with Image.open(self.image_list[0]) as first_frame_image_in_list:\n\n # Find the width and height of the first image of the list.\n # Assuming all the images have same size.\n frame_image_width, frame_image_height = first_frame_image_in_list.size\n\n # scale is the ratio of collage_image_width and product of\n # images_per_row_in_collage with frame_image_width.\n\n # The scale will always lie between 0 and 1, which implies that\n # the images are always going to get downsized.\n scale = (self.collage_image_width) / (\n self.images_per_row_in_collage * frame_image_width\n )\n\n # Calculating the scaled height and width for the frame image.\n scaled_frame_image_width = ceil(frame_image_width * scale)\n scaled_frame_image_height = ceil(frame_image_height * scale)\n\n # Divide the number of images by images_per_row_in_collage. The later\n # was calculated by taking the square root of total number of images.\n number_of_rows = ceil(self.number_of_images / self.images_per_row_in_collage)\n\n # Multiplying the height of one downsized image with number of rows.\n # Height of 1 downsized image is product of scale and frame_image_height\n # Total height is number of rows times the height of one downsized image.\n self.collage_image_height = ceil(scale * frame_image_height * number_of_rows)\n\n # Create an image of passed collage_image_width and calculated collage_image_height.\n # The downsized images will be pasted on this new base image.\n # The image is 0,0,0 RGB(black).\n collage_image = Image.new(\n \"RGB\", (self.collage_image_width, self.collage_image_height)\n )\n\n # keep track of the x and y coordinates of the resized frame images\n i, j = (0, 0)\n\n # iterate the frames and paste them on their position on the collage_image\n for count, frame_path in enumerate(self.image_list):\n\n # Set the x coordinate to zero if we are on the first column\n # If self.images_per_row_in_collage is 4\n # then 0,4,8 and so on should have their x coordinate as 0\n if (count % self.images_per_row_in_collage) == 0:\n i = 0\n\n # open the frame image, must open it to resize it using the thumbnail method\n frame = Image.open(frame_path)\n\n # scale the opened frame images\n frame.thumbnail(\n (scaled_frame_image_width, scaled_frame_image_height), Image.ANTIALIAS\n )\n\n # set the value of x to that of i's value.\n # i is set to 0 if we are on the first column.\n x = i\n\n # It ensures that y coordinate stays the same for any given row.\n # The floor of a real number is the largest integer that is less\n # than or equal to the number. floor division is used because of\n # the zero based indexing, the floor of the division stays same\n # for an entier row as the decimal values are negled by the floor.\n # for the first row the result of floor division is always zero and\n # the product of 0 with scaled_frame_image_height is also zero, they\n # y coordinate for the first row is 0.\n # For the second row the result of floor division is one and the prodcut\n # with scaled_frame_image_height ensures that the y coordinate is\n # scaled_frame_image_height below the first row.\n y = (j // self.images_per_row_in_collage) * scaled_frame_image_height\n\n # paste the frame image on the newly created base image(base image is black)\n collage_image.paste(frame, (x, y))\n frame.close()\n\n # increase the x coordinate by scaled_frame_image_width\n # to get the x coordinate of the next frame. unless the next image\n # will be on the very first column this will be the x coordinate.\n i = i + scaled_frame_image_width\n\n # increase the value of j by 1, this is to calculate the y coordinate of\n # next image. The increased number will be floor divided by images_per_row_in_collage\n # therefore the y coordinate stays the same for any given row.\n j += 1\n\n # save the base image with all the scaled frame images embeded on it.\n collage_image.save(self.output_path)\n collage_image.close()", "def sample_tiles_unstacked(self, fc, image_spec, export_radius, tags=None):\n datasources = self._get_datasources_by_tag(tags=tags)\n image_spec = ExportManager._convert_to_image_spec(image_spec)\n ExportManager._populate_cache(image_spec, datasources)\n\n output_bands = []\n for i, (ds_name, ds_config) in enumerate(datasources.items()):\n\n ds_config = dict(ds_config)\n ds_config['tag'] = list(ds_config['tag'])\n small_config = {ds_name: ds_config}\n _image_spec, small_output_bands = ExportManager(small_config)._get_image_spec_helper(image_spec, tags)\n fc = add_imagery(fc, _image_spec, output_size=export_radius, add_latlon=(i == 0))\n\n if i != 0:\n small_output_bands.remove('LAT')\n small_output_bands.remove('LON')\n\n output_bands.extend(small_output_bands)\n\n return fc, output_bands", "def __init__(self, width, height):\n self.w = width\n self.h = height\n self.cleanTiles = []\n self.tiles = [[False] * width for i in range(height)]\n self.cleaned = 0", "def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles", "def load(self, image_loader):\n self._image_loader = image_loader\n for tile_set in self.tile_sets:\n # do images first, because tiles could reference it\n for img in tile_set.images:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n tile_set.indexed_images[img.id] = self._load_image(img)\n # tiles\n for tile in tile_set.tiles:\n for img in tile.images:\n if not img.content and not img.source:\n # only image id set\n indexed_img = tile_set.indexed_images[img.id]\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)\n else:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n indexed_img = self._load_image(img)\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)", "def load(self, image_loader):\n self._image_loader = image_loader\n for tile_set in self.tile_sets:\n # do images first, because tiles could reference it\n for img in tile_set.images:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n tile_set.indexed_images[img.id] = self._load_image(img)\n # tiles\n for tile in tile_set.tiles:\n for img in tile.images:\n if not img.content and not img.source:\n # only image id set\n indexed_img = tile_set.indexed_images[img.id]\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)\n else:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n indexed_img = self._load_image(img)\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)", "def __init__(self, tiles = []):\n self.tiles = tiles", "def _tile_image(self, data):\n image = Image.open(StringIO(data))\n return image.convert('RGBA')", "def _get_tiles_and_coords(\n self, tensor: torch.Tensor\n ) -> Tuple[torch.Tensor, List[List[int]], List[List[List[int]]]]:\n assert tensor.dim() == 4 and tensor.shape[0] == 1\n\n y_coords, y_overlaps = self._calc_tile_coords(\n tensor.shape[2], self._tile_size[0], self._tile_overlap[0]\n )\n x_coords, x_overlaps = self._calc_tile_coords(\n tensor.shape[3], self._tile_size[1], self._tile_overlap[1]\n )\n tile_coords = torch.jit.annotate(List[Tuple[int, int, int, int]], [])\n [\n [\n tile_coords.append(\n (y, y + self._tile_size[0], x, x + self._tile_size[1])\n )\n for x in x_coords\n ]\n for y in y_coords\n ]\n tiles = torch.cat([tensor[..., c[0] : c[1], c[2] : c[3]] for c in tile_coords])\n return tiles, [y_coords, x_coords], [y_overlaps, x_overlaps]", "def generate_base_tiles(self):\n\n if not self.options.quiet:\n print(\"Generating Base Tiles:\")\n\n if self.options.verbose:\n print('')\n print(\"Tiles generated from the max zoom level:\")\n print(\"----------------------------------------\")\n print('')\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n\n ds = self.warped_input_dataset\n tilebands = self.dataBandsCount + 1\n querysize = self.querysize\n\n if self.options.verbose:\n print(\"dataBandsCount: \", self.dataBandsCount)\n print(\"tilebands: \", tilebands)\n\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n ti = 0\n\n tile_details = []\n\n tz = self.tmaxz\n for ty in range(tmaxy, tminy-1, -1):\n for tx in range(tminx, tmaxx+1):\n\n ti += 1\n ytile = GDAL2Tiles.getYtile(ty, tz, self.options)\n tilefilename = os.path.join(\n self.output_folder, str(tz), '{0:04d}'.format(tx) + \"_\" + '{0:04d}'.format(ytile) + \".\" + self.tileext)\n if self.options.verbose:\n print(ti, '/', tcount, tilefilename)\n\n if self.options.resume and os.path.exists(tilefilename):\n if self.options.verbose:\n print(\"Tile generation skipped because of --resume\")\n continue\n\n # Create directories for the tile\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:3857\n b = self.mercator.TileBounds(tx, ty, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty, tz)\n\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n\n if self.options.profile in ('mercator', 'geodetic'):\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])\n\n # Pixel size in the raster covering query geo extent\n nativesize = wb[0] + wb[2]\n if self.options.verbose:\n print(\"\\tNative Extent (querysize\", nativesize, \"): \", rb, wb)\n\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n\n else: # 'raster' profile:\n\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels\n ysize = self.warped_input_dataset.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty * tsize) - rysize\n\n wx, wy = 0, 0\n wxsize = int(rxsize/float(tsize) * self.tilesize)\n wysize = int(rysize/float(tsize) * self.tilesize)\n if wysize != self.tilesize:\n wy = self.tilesize - wysize\n\n # Read the source raster if anything is going inside the tile as per the computed\n # geo_query\n tile_details.append(\n TileDetail(\n tx=tx, ty=ytile, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,\n wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,\n )\n )\n\n conf = TileJobInfo(\n src_file=self.tmp_vrt_filename,\n nb_data_bands=self.dataBandsCount,\n output_file_path=self.output_folder,\n tile_extension=self.tileext,\n tile_driver=self.tiledriver,\n tile_size=self.tilesize,\n kml=self.kml,\n tminmax=self.tminmax,\n tminz=self.tminz,\n tmaxz=self.tmaxz,\n in_srs_wkt=self.in_srs_wkt,\n out_geo_trans=self.out_gt,\n ominy=self.ominy,\n is_epsg_4326=self.isepsg4326,\n options=self.options,\n )\n\n return conf, tile_details", "def test_combine():\n # Create 4 square arrays:\n # 0 1 2 3\n # -----------\n # 00 11 22 33\n # 00 11 22 33\n tiles = [np.array(_square(i)) for i in range(4)]\n\n with pytest.raises(ValueError):\n _combine_tiles(tiles[0], tiles[1], tiles[2]) # Too few values.\n\n with pytest.raises(ValueError):\n _combine_tiles(tiles[0], None, None, None, None) # Too many values.\n\n # Combine them the 4 major ways:\n\n # case1: corner\n # 0X\n # XX\n case1 = _combine_tiles(tiles[0], None, None, None)\n assert case1.shape == (2, 2)\n assert (case1 == tiles[0]).all()\n\n # case2: bottom edge\n # 01\n # XX\n case2 = _combine_tiles(tiles[0], tiles[1], None, None)\n assert case2.shape == (2, 4)\n assert (case2[0:2, 0:2] == tiles[0]).all()\n assert (case2[0:2, 3:5] == tiles[1]).all()\n\n # case3: right edge\n # 0X\n # 2X\n case3 = _combine_tiles(tiles[0], None, tiles[2], None)\n assert case3.shape == (4, 2)\n assert (case3[0:2, 0:2] == tiles[0]).all()\n assert (case3[3:5, 0:2] == tiles[2]).all()\n\n # case4: interior\n # 01\n # 23\n case4 = _combine_tiles(tiles[0], tiles[1], tiles[2], tiles[3])\n assert case4.shape == (4, 4)\n assert (case4[0:2, 0:2] == tiles[0]).all()\n assert (case4[0:2, 3:5] == tiles[1]).all()\n assert (case4[3:5, 0:2] == tiles[2]).all()\n assert (case4[3:5, 3:5] == tiles[3]).all()", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def merge_images(_input_image_paths : list[str], _output_image_dir : str, \\\n _constraint_amount : int, _constraint_type : Direction, _fill_direction : Direction) -> None:\n #_num_rows : int, ) -> None:\n #Find the largest x/y sizes of all input images to ensure the resulting merged image\n # will conform to some amount of regularity\n image_x_sizes = []\n image_y_sizes = []\n for image in _input_image_paths:\n with Image.open(image) as image_object:\n image_x_sizes.append(image_object.size[0])\n image_y_sizes.append(image_object.size[1])\n largest_x = max(image_x_sizes)\n largest_y = max(image_y_sizes)\n\n #Since the user fixed the number of images and rows, we can decide the number of columns\n if _constraint_type == Direction.ROW:\n num_rows = _constraint_amount\n num_columns = math.ceil(len(_input_image_paths) / _constraint_amount)\n\n #Since the user fixed the number of images and columns, we can decide the number of rows\n elif _constraint_type == Direction.COLUMN:\n num_columns = _constraint_amount\n num_rows = math.ceil(len(_input_image_paths) / _constraint_amount)\n\n else:\n sys.exit(\"Merge dimension constraint error\")\n\n #Generate the coordinates for each of the images, based on how many rows/coulmns\n # the user specified, and whether the images are being placed filling row by row\n # or filling column by column\n coordinates = generate_image_coordinates(_fill_direction, len(_input_image_paths), \\\n num_rows, num_columns, largest_x, largest_y)\n\n #Set up the new image, whose dimensions accommodate all input images (and maybe a\n # bit of extra blank space, depending on how evenly the images fit)\n with Image.new(\"RGBA\", (largest_x * num_columns, largest_y * num_rows), \\\n WHITE_COLOUR_ALPHA) as new_canvas:\n\n #Paste all of the input images to the new canvas\n image_count = 0\n for image in _input_image_paths:\n with Image.open(image) as image_object:\n new_canvas.paste(image_object, coordinates[image_count])\n image_count += 1\n\n #Once all the images have been pasted in, then save the image.\n new_canvas.save(f\"{_output_image_dir}({num_rows}x{num_columns})_\"\n f\"{direction_to_string(_fill_direction)}-merged.PNG\")", "def test_generate_tiles_2(self):\n tile_list = utils.generate_tiles()\n self.assertIsInstance(tile_list[0], Tile)" ]
[ "0.6996957", "0.6695184", "0.63886476", "0.62590015", "0.6253077", "0.6211737", "0.62029433", "0.60706353", "0.6000102", "0.596734", "0.5926167", "0.5918759", "0.5894937", "0.5867754", "0.5861465", "0.5857397", "0.5848029", "0.5744577", "0.5702461", "0.5656095", "0.56503665", "0.56347495", "0.56256145", "0.5623565", "0.56085813", "0.5607058", "0.55955684", "0.55894506", "0.55823416", "0.55605465", "0.55140835", "0.5512697", "0.54932743", "0.54880184", "0.54879063", "0.5486487", "0.54837734", "0.5483272", "0.5473186", "0.54475546", "0.5418708", "0.541404", "0.541404", "0.54056823", "0.5402589", "0.53906244", "0.53901494", "0.5385121", "0.53775984", "0.5374606", "0.53678894", "0.5363457", "0.53627014", "0.53545946", "0.5353954", "0.53537697", "0.5353748", "0.53423595", "0.5332115", "0.5325555", "0.5325555", "0.5286386", "0.52813876", "0.52671903", "0.5260183", "0.52570057", "0.52552867", "0.5244443", "0.52352816", "0.52315044", "0.52284765", "0.5188243", "0.51823866", "0.51817197", "0.51797134", "0.51783466", "0.5160765", "0.5159334", "0.51560855", "0.51513666", "0.51456606", "0.5139218", "0.51346165", "0.5134444", "0.51233405", "0.5121243", "0.51168036", "0.5113213", "0.51110595", "0.5102339", "0.5102041", "0.5102041", "0.50891984", "0.5087279", "0.50838745", "0.5083458", "0.50708693", "0.50680614", "0.5064438", "0.5047611" ]
0.7703681
0
Basic sanity checks prior to performing a split.
def validate_image(image, number_tiles): TILE_LIMIT = 99 * 99 try: number_tiles = int(number_tiles) except BaseException: raise ValueError("number_tiles could not be cast to integer.") if number_tiles > TILE_LIMIT or number_tiles < 2: raise ValueError( "Number of tiles must be between 2 and {} (you \ asked for {}).".format( TILE_LIMIT, number_tiles ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_splitSanity(self):\n # Whiteboxing\n self.assertRaises(ValueError, irc.split, \"foo\", -1)\n self.assertRaises(ValueError, irc.split, \"foo\", 0)\n self.assertEqual([], irc.split(\"\", 1))\n self.assertEqual([], irc.split(\"\"))", "def test_splitValidatesLength(self):\n self.assertRaises(ValueError, irc.split, \"foo\", 0)\n self.assertRaises(ValueError, irc.split, \"foo\", -1)", "def test_split_string_empty_string(self):\n self.assertFalse(split_string(\"\"))", "def canSplit(self):\n return False", "def test_split_string_wrong_input_data(self):\n self.assertEqual(\"Wrong input data\", split_string(13))", "def split(self, splits, catchall=False):\r\n raise NotImplementedError()", "def __check_split_object_validity(self, split):\n\n if split is None:\n raise Pdf4meClientException('The split parameter cannot be None.')\n elif split.document is None or split.document.doc_data is None:\n raise Pdf4meClientException('The split document cannot be None.')\n elif split.split_action is None:\n raise Pdf4meClientException('The split_action cannot be None.')\n elif split.split_action.split_after_page is None or split.split_action.split_after_page == 0:\n raise Pdf4meClientException('The split_after_page of split_action cannot be None or zero.'\n 'The first page of a PDF corresponds to page number one.')", "def testSplit(self):\n\n s = StrObject(u\"first second\")\n result = s.call(u\"split\", [StrObject(u\" \")])\n pieces = [obj._s for obj in unwrapList(result)]\n self.assertEqual(pieces, [u\"first\", u\"second\"])", "def test_tt_split(self):\n\n bad_arg1 = 5\n bad_arg2 = \"It's a string!\"\n ld = Lambdata(self.df)\n\n ld.tt_split(bad_arg1)\n ld.tt_split(bad_arg2)\n self.assertRaises(ValueError)", "def test_separators_only():\n assert my_splitter(\",ad,\", \"ad\") == [\",\", \",\"]", "def test_split_string(self):\n self.assertEqual(('1-4', 14), split_string('1-4/14'))", "def test_split():\n nmrs = NIFTI_MRS(test_data_split)\n\n # Error testing\n # Wrong dim tag\n with pytest.raises(ValueError) as exc_info:\n nmrs_tools.split(nmrs, 'DIM_EDIT', 1)\n\n assert exc_info.type is ValueError\n assert exc_info.value.args[0] == \"DIM_EDIT not found as dimension tag.\"\\\n \" This data contains ['DIM_COIL', 'DIM_DYN', None].\"\n\n # Wrong dim index (no dim in this data)\n with pytest.raises(ValueError) as exc_info:\n nmrs_tools.split(nmrs, 6, 1)\n\n assert exc_info.type is ValueError\n assert exc_info.value.args[0] == \"Dimension must be one of 4, 5, or 6 (or DIM_TAG string).\"\\\n \" This data has 6 dimensions,\"\\\n \" i.e. a maximum dimension value of 5.\"\n\n # Wrong dim index (too low)\n with pytest.raises(ValueError) as exc_info:\n nmrs_tools.split(nmrs, 3, 1)\n\n assert exc_info.type is ValueError\n assert exc_info.value.args[0] == \"Dimension must be one of 4, 5, or 6 (or DIM_TAG string).\"\\\n \" This data has 6 dimensions,\"\\\n \" i.e. a maximum dimension value of 5.\"\n\n # Wrong dim index type\n with pytest.raises(TypeError) as exc_info:\n nmrs_tools.split(nmrs, [3, ], 1)\n\n assert exc_info.type is TypeError\n assert exc_info.value.args[0] == \"Dimension must be an int (4, 5, or 6) or string (DIM_TAG string).\"\n\n # Single index - out of range low\n with pytest.raises(ValueError) as exc_info:\n nmrs_tools.split(nmrs, 'DIM_DYN', -1)\n\n assert exc_info.type is ValueError\n assert exc_info.value.args[0] == \"index_or_indicies must be between 0 and N-1,\"\\\n \" where N is the size of the specified dimension (16).\"\n\n # Single index - out of range high\n with pytest.raises(ValueError) as exc_info:\n nmrs_tools.split(nmrs, 'DIM_DYN', 64)\n\n assert exc_info.type is ValueError\n assert exc_info.value.args[0] == \"index_or_indicies must be between 0 and N-1,\"\\\n \" where N is the size of the specified dimension (16).\"\n\n # List of indicies - out of range low\n with pytest.raises(ValueError) as exc_info:\n nmrs_tools.split(nmrs, 'DIM_DYN', [-1, 0, 1])\n\n assert exc_info.type is ValueError\n assert exc_info.value.args[0] == \"index_or_indicies must have elements between 0 and N,\"\\\n \" where N is the size of the specified dimension (16).\"\n\n # List of indicies - out of range high\n with pytest.raises(ValueError) as exc_info:\n nmrs_tools.split(nmrs, 'DIM_DYN', [0, 65])\n\n assert exc_info.type is ValueError\n assert exc_info.value.args[0] == \"index_or_indicies must have elements between 0 and N,\"\\\n \" where N is the size of the specified dimension (16).\"\n\n # List of indicies - wrong type\n with pytest.raises(TypeError) as exc_info:\n nmrs_tools.split(nmrs, 'DIM_DYN', '1')\n\n assert exc_info.type is TypeError\n assert exc_info.value.args[0] == \"index_or_indicies must be single index or list of indicies\"\n\n # Functionality testing\n\n out_1, out_2 = nmrs_tools.split(nmrs, 'DIM_DYN', 7)\n assert out_1[:].shape == (1, 1, 1, 4096, 4, 8)\n assert out_2[:].shape == (1, 1, 1, 4096, 4, 8)\n assert np.allclose(out_1[:], nmrs[:, :, :, :, :, 0:8])\n assert np.allclose(out_2[:], nmrs[:, :, :, :, :, 8:])\n assert out_1.hdr_ext == nmrs.hdr_ext\n assert out_1.hdr_ext == nmrs.hdr_ext\n assert np.allclose(out_1.getAffine('voxel', 'world'), nmrs.getAffine('voxel', 'world'))\n assert np.allclose(out_2.getAffine('voxel', 'world'), nmrs.getAffine('voxel', 'world'))\n\n out_1, out_2 = nmrs_tools.split(nmrs, 'DIM_DYN', [0, 4, 15])\n assert out_1[:].shape == (1, 1, 1, 4096, 4, 13)\n assert out_2[:].shape == (1, 1, 1, 4096, 4, 3)\n test_list = np.arange(0, 16)\n test_list = np.delete(test_list, [0, 4, 15])\n assert np.allclose(out_1[:], nmrs[:][:, :, :, :, :, test_list])\n assert np.allclose(out_2[:], nmrs[:][:, :, :, :, :, [0, 4, 15]])\n\n # Split some synthetic data with header information\n nhdr_1 = gen_nifti_mrs(\n np.ones((1, 1, 1, 10, 4), dtype=complex),\n 1 / 1000,\n 100.0,\n '1H',\n dim_tags=['DIM_DYN', None, None])\n\n nhdr_1.set_dim_tag(\n 'DIM_DYN',\n 'DIM_DYN',\n header={'RepetitionTime': [1, 2, 3, 4]})\n\n out_1, out_2 = nmrs_tools.split(nhdr_1, 'DIM_DYN', 1)\n assert out_1.shape == (1, 1, 1, 10, 2)\n assert out_1.hdr_ext['dim_5'] == 'DIM_DYN'\n assert out_1.hdr_ext['dim_5_header'] == {'RepetitionTime': [1, 2]}\n assert out_2.hdr_ext['dim_5_header'] == {'RepetitionTime': [3, 4]}", "def test_splitany(self):\n\n # Test valid empty inputs\n\n self.assertEqual(splitany(None, None), [])\n\n self.assertEqual(splitany(b'', b''), [b''])\n self.assertEqual(splitany(b'', ''), [b''])\n\n self.assertEqual(splitany('', b''), [''])\n self.assertEqual(splitany('', ''), [''])\n\n # Test invalid strings to split\n\n for invalid in ({}, set(), (x for x in 'abc')):\n with self.assertRaises(TypeError):\n splitany(invalid, u'')\n with self.assertRaises(TypeError):\n splitany(invalid, b'')\n\n # Test invalid separators to split on\n\n for invalid in ({}, set(), [u'', b'', {}], (u'', b'', set())):\n with self.assertRaises(TypeError):\n splitany(u'', invalid)\n with self.assertRaises(TypeError):\n splitany(b'', invalid)\n\n # Test single inputs\n\n # No matches with unicode and binary combinations\n self.assertEqual(splitany(u'abc'), u'abc'.split())\n self.assertEqual(splitany(b'abc'), b'abc'.split())\n\n self.assertEqual(splitany(u'abc', u'x'), u'abc'.split(u'x'))\n self.assertEqual(splitany(b'abc', u'x'), b'abc'.split(b'x'))\n self.assertEqual(splitany(u'abc', b'x'), u'abc'.split(u'x'))\n self.assertEqual(splitany(b'abc', b'x'), b'abc'.split(b'x'))\n\n # One initial match with unicode and binary combinations\n self.assertEqual(splitany(u' abc'), u' abc'.split())\n self.assertEqual(splitany(b' abc'), b' abc'.split())\n\n self.assertEqual(splitany(u'xyz', u'x'), u'xyz'.split(u'x'))\n self.assertEqual(splitany(b'xyz', u'x'), b'xyz'.split(b'x'))\n self.assertEqual(splitany(u'xyz', b'x'), u'xyz'.split(u'x'))\n self.assertEqual(splitany(b'xyz', b'x'), b'xyz'.split(b'x'))\n self.assertEqual(splitany(u'xyz', b'x', 0), u'xyz'.split(u'x', 0))\n self.assertEqual(splitany(b'xyz', u'x', 1), b'xyz'.split(b'x', 1))\n self.assertEqual(splitany(u'xyz', u'x', 2), u'xyz'.split(u'x', 2))\n self.assertEqual(splitany(b'xyz', b'x', 3), b'xyz'.split(b'x', 3))\n\n # One complete match with unicode and binary combinations\n self.assertEqual(splitany(u' '), u' '.split())\n self.assertEqual(splitany(b' '), b' '.split())\n\n self.assertEqual(splitany(u'xyz', [u'xyz']), u'xyz'.split(u'xyz'))\n self.assertEqual(splitany(b'xyz', [u'xyz']), b'xyz'.split(b'xyz'))\n self.assertEqual(splitany(u'xyz', [b'xyz']), u'xyz'.split(u'xyz'))\n self.assertEqual(splitany(b'xyz', [b'xyz']), b'xyz'.split(b'xyz'))\n self.assertEqual(splitany(u'xyz', [b'xyz'], 0), u'xyz'.split(u'xyz', 0))\n self.assertEqual(splitany(b'xyz', [u'xyz'], 1), b'xyz'.split(b'xyz', 1))\n self.assertEqual(splitany(u'xyz', [u'xyz'], 2), u'xyz'.split(u'xyz', 2))\n self.assertEqual(splitany(b'xyz', [b'xyz'], 3), b'xyz'.split(b'xyz', 3))\n\n # Two internal matches with unicode and binary combinations\n self.assertEqual(splitany(u'abc def ghi'), u'abc def ghi'.split())\n self.assertEqual(splitany(b'abc def ghi'), b'abc def ghi'.split())\n\n self.assertEqual(splitany(u'xyzxyz', u'y'), u'xyzxyz'.split(u'y'))\n self.assertEqual(splitany(b'xyzxyz', u'y'), b'xyzxyz'.split(b'y'))\n self.assertEqual(splitany(u'xyzxyz', b'y'), u'xyzxyz'.split(u'y'))\n self.assertEqual(splitany(b'xyzxyz', b'y'), b'xyzxyz'.split(b'y'))\n self.assertEqual(splitany(u'xyzxyz', b'y', 0), u'xyzxyz'.split(u'y', 0))\n self.assertEqual(splitany(b'xyzxyz', u'y', 1), b'xyzxyz'.split(b'y', 1))\n self.assertEqual(splitany(u'xyzxyz', u'y', 2), u'xyzxyz'.split(u'y', 2))\n self.assertEqual(splitany(b'xyzxyz', b'y', 3), b'xyzxyz'.split(b'y', 3))\n\n # Two internal multi-char matches with unicode and binary combinations\n self.assertEqual(splitany(u'abc\\n\\tdef'), u'abc\\n\\tdef'.split())\n self.assertEqual(splitany(b'abc\\n\\tdef'), b'abc\\n\\tdef'.split())\n\n self.assertEqual(splitany(u'xyzxyz', [u'yz']), u'xyzxyz'.split(u'yz'))\n self.assertEqual(splitany(b'xyzxyz', [u'yz']), b'xyzxyz'.split(b'yz'))\n self.assertEqual(splitany(u'xyzxyz', [b'yz']), u'xyzxyz'.split(u'yz'))\n self.assertEqual(splitany(b'xyzxyz', [b'yz']), b'xyzxyz'.split(b'yz'))\n self.assertEqual(splitany(u'xyzxyz', [b'yz'], 0), u'xyzxyz'.split(u'yz', 0))\n self.assertEqual(splitany(b'xyzxyz', [u'yz'], 1), b'xyzxyz'.split(b'yz', 1))\n self.assertEqual(splitany(u'xyzxyz', [u'yz'], 2), u'xyzxyz'.split(u'yz', 2))\n self.assertEqual(splitany(b'xyzxyz', [b'yz'], 3), b'xyzxyz'.split(b'yz', 3))\n\n # Three internal and trailing matches with unicode and binary combinations\n self.assertEqual(splitany(u'abc def\\tdef\\n'), u'abc def\\tdef\\n'.split())\n self.assertEqual(splitany(b'abc def\\tdef\\n'), b'abc def\\tdef\\n'.split())\n\n self.assertEqual(splitany(u'xyzxyzxyz', u'z'), u'xyzxyzxyz'.split(u'z'))\n self.assertEqual(splitany(b'xyzxyzxyz', u'z'), b'xyzxyzxyz'.split(b'z'))\n self.assertEqual(splitany(u'xyzxyzxyz', b'z'), u'xyzxyzxyz'.split(u'z'))\n self.assertEqual(splitany(b'xyzxyzxyz', b'z'), b'xyzxyzxyz'.split(b'z'))\n self.assertEqual(splitany(u'xyzxyzxyz', b'z', 0), u'xyzxyzxyz'.split(u'z', 0))\n self.assertEqual(splitany(b'xyzxyzxyz', u'z', 1), b'xyzxyzxyz'.split(b'z', 1))\n self.assertEqual(splitany(u'xyzxyzxyz', b'z', 2), u'xyzxyzxyz'.split(u'z', 2))\n self.assertEqual(splitany(b'xyzxyzxyz', b'z', 3), b'xyzxyzxyz'.split(b'z', 3))\n self.assertEqual(splitany(u'xyzxyzxyz', u'z', 4), u'xyzxyzxyz'.split(u'z', 4))\n\n # Three internal and trailing multi-char matches with unicode and binary combinations\n self.assertEqual(splitany(u'abc\\t\\tdef def\\n\\n'), u'abc\\t\\tdef def\\n\\n'.split())\n self.assertEqual(splitany(b'abc\\t\\tdef def\\n\\n'), b'abc\\t\\tdef def\\n\\n'.split())\n\n self.assertEqual(splitany(u'xyzxyzxyz', [u'yz']), u'xyzxyzxyz'.split(u'yz'))\n self.assertEqual(splitany(b'xyzxyzxyz', [u'yz']), b'xyzxyzxyz'.split(b'yz'))\n self.assertEqual(splitany(u'xyzxyzxyz', [b'yz']), u'xyzxyzxyz'.split(u'yz'))\n self.assertEqual(splitany(b'xyzxyzxyz', [b'yz']), b'xyzxyzxyz'.split(b'yz'))\n self.assertEqual(splitany(u'xyzxyzxyz', [b'yz'], 0), u'xyzxyzxyz'.split(u'yz', 0))\n self.assertEqual(splitany(b'xyzxyzxyz', [u'yz'], 1), b'xyzxyzxyz'.split(b'yz', 1))\n self.assertEqual(splitany(u'xyzxyzxyz', [b'yz'], 2), u'xyzxyzxyz'.split(u'yz', 2))\n self.assertEqual(splitany(b'xyzxyzxyz', [b'yz'], 3), b'xyzxyzxyz'.split(b'yz', 3))\n self.assertEqual(splitany(u'xyzxyzxyz', [u'yz'], 4), u'xyzxyzxyz'.split(u'yz', 4))\n\n # Test multiple inputs expecting unicode\n\n split_mult = u'abc:d:;|w:xy;|z'\n split_once = u'abc,d,,,w,xy,,z'\n\n # Test unicode without maxsplit option to ensure equivalence to str.split\n self.assertEqual(splitany(split_mult, u';|:'), split_once.split(u','))\n self.assertEqual(splitany(split_mult, b';|:'), split_once.split(u','))\n self.assertEqual(splitany(split_mult, [x for x in u';|:']), split_once.split(u','))\n self.assertEqual(splitany(split_mult, [b';', b'|', b':']), split_once.split(u','))\n self.assertEqual(splitany(split_mult, [b'|', u':', b';']), split_once.split(u','))\n\n for i in range(7):\n target = split_once.split(u',', i)\n\n # Test unicode with maxsplit option to ensure equivalence to str.split\n for sep in (u';|:', b';|:', [x for x in u';|:'], [b';', b'|', b':'], [b'|', u':', b';']):\n source = splitany(split_mult, sep, i)\n length = len(source[i])\n index = split_mult.index(source[i])\n\n self.assertEqual(source[:i], target[:i]) # Parsed portions are equal\n self.assertEqual(length, len(target[i])) # Remaining portions equal in length\n self.assertEqual(source[i], split_mult[index:]) # Remaining portion exists in original\n\n # Test multiple inputs expecting binary\n\n split_mult = b'abc:d:;|w:xy;|z'\n split_once = b'abc,d,,,w,xy,,z'\n\n # Test binary without maxsplit option to ensure equivalence to str.split\n self.assertEqual(splitany(split_mult, u';|:'), split_once.split(b','))\n self.assertEqual(splitany(split_mult, b';|:'), split_once.split(b','))\n self.assertEqual(splitany(split_mult, [x for x in u';|:']), split_once.split(b','))\n self.assertEqual(splitany(split_mult, [b';', b'|', b':']), split_once.split(b','))\n self.assertEqual(splitany(split_mult, [b'|', u':', b';']), split_once.split(b','))\n\n for i in range(7):\n target = split_once.split(b',', i)\n\n # Test binary with maxsplit option to ensure equivalence to str.split\n for sep in (u';|:', b';|:', [x for x in u';|:'], [b';', b'|', b':'], [b'|', u':', b';']):\n source = splitany(split_mult, sep, i)\n length = len(source[i])\n index = split_mult.index(source[i])\n\n self.assertEqual(source[:i], target[:i]) # Parsed portions are equal\n self.assertEqual(length, len(target[i])) # Remaining portions equal in length\n self.assertEqual(source[i], split_mult[index:]) # Remaining portion exists in original\n\n # Test multiple character inputs with overlapping separators, with unicode and binary\n\n self.assertEqual(splitany(u'aabdabcd', [u'a']), [u'', u'', u'bd', u'bcd'])\n self.assertEqual(splitany(u'aabdabcd', [b'a']), [u'', u'', u'bd', u'bcd'])\n self.assertEqual(splitany(b'aabdabcd', [u'a']), [b'', b'', b'bd', b'bcd'])\n self.assertEqual(splitany(b'aabdabcd', [b'a']), [b'', b'', b'bd', b'bcd'])\n\n self.assertEqual(splitany(u'aabdabcd', [u'ab', u'a']), [u'', u'', u'd', u'cd'])\n self.assertEqual(splitany(b'aabdabcd', [b'ab', u'a']), [b'', b'', b'd', b'cd'])\n self.assertEqual(splitany(u'aabdabcd', [u'a', b'ab']), [u'', u'', u'd', u'cd'])\n self.assertEqual(splitany(b'aabdabcd', [b'a', b'ab']), [b'', b'', b'd', b'cd'])\n\n self.assertEqual(splitany(u'aabdabcd', [b'abc', u'ab', b'a']), [u'', u'', u'd', u'd'])\n self.assertEqual(splitany(b'aabdabcd', [u'abc', b'ab', u'a']), [b'', b'', b'd', b'd'])\n self.assertEqual(splitany(u'aabdabcd', [u'a', b'ab', u'abc']), [u'', u'', u'd', u'd'])\n self.assertEqual(splitany(b'aabdabcd', [b'a', u'ab', b'abc']), [b'', b'', b'd', b'd'])", "def nextSplit(self):\n pass", "def test_hunsplit(self):\n convert3 = cnv()\n # normal case\n self.assertEqual(convert3.hunsplit('dreihundertvierzehn'), 314)\n # case no tens no ones\n self.assertEqual(convert3.hunsplit('zweihundert'), 200)\n # case no tens\n self.assertEqual(convert3.hunsplit('dreihundertundzwei'), 302)\n # case no ones\n self.assertEqual(convert3.hunsplit('vierhundertfünfzig'), 450)\n # case nothing before nothing behind\n self.assertEqual(convert3.hunsplit('hundert'), 100)", "def addSplit(self):\n pass", "def test_splitParam(self):\n params = [\n (\"FOO\", (\"FOO\", [\"\"])),\n (\"FOO=\", (\"FOO\", [\"\"])),\n (\"FOO=1\", (\"FOO\", [\"1\"])),\n (\"FOO=1,2,3\", (\"FOO\", [\"1\", \"2\", \"3\"])),\n (\"FOO=A\\\\x20B\", (\"FOO\", [\"A B\"])),\n (\"FOO=\\\\x5Cx\", (\"FOO\", [\"\\\\x\"])),\n (\"FOO=\\\\\", (\"FOO\", [\"\\\\\"])),\n (\"FOO=\\\\n\", (\"FOO\", [\"\\\\n\"])),\n ]\n\n _splitParam = irc.ServerSupportedFeatures._splitParam\n\n for param, expected in params:\n res = _splitParam(param)\n self.assertEqual(res, expected)\n\n self.assertRaises(ValueError, _splitParam, \"FOO=\\\\x\")\n self.assertRaises(ValueError, _splitParam, \"FOO=\\\\xNN\")\n self.assertRaises(ValueError, _splitParam, \"FOO=\\\\xN\")\n self.assertRaises(ValueError, _splitParam, \"FOO=\\\\x20\\\\x\")", "def test_missing_delim(self):", "def test_line_split():\n for _x in range(100):\n delim = choice((\"=\", \"|\", \",\", \"$\", \".\", \"/\"))\n l_str = delim.join([random_str(5, 10) for x in range(30)])\n line = Line(l_str, random_str(10, 20), randint(1, 10000))\n # Split the string\n l_parts = line.split(delim)\n exp_parts = l_str.split(delim)\n assert len(l_parts) == len(exp_parts)\n for l_part, x_part in zip(l_parts, exp_parts):\n assert isinstance(l_part, Line)\n assert l_part == x_part\n assert l_part.file == line.file\n assert l_part.number == line.number", "def test_missing_split_by_value():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n HISTOGRAM bigfoot X temperature_mid SPLIT BY\n \"\"\"\n\n # TODO Get a more specific exception here.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_splitDelimiters(self):\n r = irc.split(\"xx yyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)\n r = irc.split(\"xx\\nyyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)", "def test_without_separator():\n assert my_splitter(\"string with !@#$double spaces\") == \\\n [\"string\", \"with\", \"!@#$double\", \"spaces\"]", "def test_words_with_sep():\n assert my_splitter(\"bla,bla\", \",\") == [\"bla\", \"bla\"]", "def sanity_check(self):\n pass", "def test_get_parts(self):\n pass", "def split(self, X):", "def test_splitPartiesString(self):\n s = \"Appellant: Lucy Johnston - Respondent: Mary-Jane Lawrence\"\n expected = [\"Appellant: Lucy Johnston \",\" Respondent: Mary-Jane Lawrence\"]\n self.assertEqual(expected, split_parties.splitPartiesString(s))", "def test_split_seq(self):\r\n seq = 'AAAACCCCCGTGTGTGT'\r\n barcode, primer, remainder = split_seq(seq, 4, 5)\r\n self.assertEqual(barcode, 'AAAA')\r\n self.assertEqual(primer, 'CCCCC')\r\n self.assertEqual(remainder, 'GTGTGTGT')", "def testSplit(self):\r\n data={\r\n # 1\r\n 'emptyPath':\r\n ['',{'fileBase':'',\r\n 'fileExt':None,\r\n 'dirs':[]}],\r\n\r\n # 2\r\n 'fileBaseOnly':\r\n ['fileBase',{'fileBase':'fileBase',\r\n 'fileExt':None,\r\n 'dirs':[]}],\r\n \r\n # 3\r\n 'fileExtOnly':\r\n ['.ext',{'fileBase':'',\r\n 'fileExt':'ext',\r\n 'dirs':[]}],\r\n\r\n # 4\r\n 'fileBaseEmptyFileExt':\r\n ['fileBase.',{'fileBase':'fileBase',\r\n 'fileExt':'',\r\n 'dirs':[]}],\r\n\r\n # 5\r\n 'fullFileName':\r\n ['fileBase.ext',{'fileBase':'fileBase',\r\n 'fileExt':'ext',\r\n 'dirs':[]}],\r\n\r\n # 6\r\n 'singleDir':\r\n ['dir/',{'fileBase':'',\r\n 'fileExt':None,\r\n 'dirs':['dir']}],\r\n\r\n # 7\r\n 'twoDirs':\r\n ['dir1/dir2/',{'fileBase':'',\r\n 'fileExt':None,\r\n 'dirs':['dir1','dir2']}],\r\n\r\n # 8\r\n 'absolutePathTwoDirsFullFileName':\r\n ['/dir1/dir2/fileBase.ext',{'fileBase':'fileBase',\r\n 'fileExt':'ext',\r\n 'dirs':['','dir1','dir2']}],\r\n\r\n # 9\r\n 'dirWithAPeriod':\r\n ['/dir.dirExt/fileBase.fileExt',{'fileBase':'fileBase',\r\n 'fileExt':'fileExt',\r\n 'dirs':['','dir.dirExt']}]\r\n }\r\n\r\n for k in data.iterkeys():\r\n s1=ufsi.NativeUnixPath(data[k][0]).split()\r\n s2=data[k][1]\r\n for s2k in s2.iterkeys():\r\n self.assertEquals(s1[s2k],s2[s2k],\r\n '%s: Item %s of dict %r should be %s'\r\n %(k,s2k,s1,s2[s2k]))", "def test_two_chars_and_separator():\n assert my_splitter(\",J\", \",\") == [\"\", \"J\"]", "def test_undsplit(self):\n convert2 = cnv()\n # normal case where tens != 0 and ones != 0\n self.assertEqual(convert2.undsplit('einundzwanzig'), 21)\n # special case 1 only one word\n self.assertEqual(convert2.undsplit('fünf'), 5)\n self.assertEqual(convert2.undsplit('fünfzig'), 50)\n # special case 2 empty string\n self.assertEqual(convert2.undsplit(''), 0)", "def test_parse_devide(self):\n self.assertEqual(parse_input.parse([\"8\", \"/\", \"4\"]), 2)", "def test_check_http_url_split_validation():\n with pytest.raises(ValueError):\n http_urlsplit('https://aaa.cz')\n\n with pytest.raises(ValueError):\n http_urlsplit('ftp://ddd.cz')", "def main():\n\n try:\n dir = sys.argv[1].strip(os.sep)\n split = int(sys.argv[2])\n except IndexError:\n print __usage__\n sys.exit()\n\n if os.path.isdir(dir):\n splitDir(dir,split)\n else:\n err = \"%s is not a directory\" % dir\n raise IOError(err)", "def test_double_spaces():\n assert my_splitter(\"string with !@#$double spaces\", \" \") == \\\n [\"string\", \"\", \"with\", \"\", \"!@#$double\", \"\", \"spaces\"]", "def test_split_lib_transform(self):\r\n header = '>E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0'\r\n self.assertEqual(split_lib_transform(header),\r\n ('E2_1 read_id=FYI2DSB01B17QJ barcode=ATCACTAGTCAC', 'FYI2DSB01B17QJ'))", "def _validate_and_split_key(self, key):\n if self._len_keys == 1:\n return self._validate_and_split_len_one(key)\n else:\n return self._validate_and_split_len(key)", "def _setup_splits(self):\n #ntot = self.reredux_conf['nperfile']\n ntot = self.reredux_conf['Ngals']\n npersplit = self.runconf['nper']\n\n self.beglist, self.endlist = get_splits(ntot, npersplit)", "def test_default(self):\n fixed_splitter = utils.WidthSplitter((4, 3, 5, 6, 10, 10, 10, 10, 10, 10))\n line_full = \" 60 H 10 s 0.14639 0.00000 0.00000 -0.00000 -0.00000 0.00000\"\n line_truncated = \" 1 C 1 s -0.00000 -0.00000 0.00000\"\n ref_full = ['60', 'H', '10', 's', '0.14639', '0.00000', '0.00000', '-0.00000', '-0.00000', '0.00000']\n ref_truncated = ['1', 'C', '1', 's', '-0.00000', '-0.00000', '0.00000']\n tokens_full = fixed_splitter.split(line_full)\n tokens_truncated = fixed_splitter.split(line_truncated)\n self.assertEqual(ref_full, tokens_full)\n self.assertEqual(ref_truncated, tokens_truncated)", "def test_string_ends_with_sep():\n assert my_splitter(\"aaa,bbb,\", \",\") == [\"aaa\", \"bbb\", \"\"]", "def test_splitParamArgs(self):\n res = irc.ServerSupportedFeatures._splitParamArgs([\"A:1\", \"B:2\", \"C:\", \"D\"])\n self.assertEqual(res, [(\"A\", \"1\"), (\"B\", \"2\"), (\"C\", \"\"), (\"D\", \"\")])", "def make_split_data(read_data):\n split_data = re.split('[,|\\.|\\-|\\*|\\[|\\]|\\#|\\:|\\;|(\\|)|\\\"|\\'|!|\\s]+',read_data)\n\n if split_data[-1] == '':\n del split_data[-1]\n\n return split_data", "def test_split_string(self):\n mytext = '2011 Senior PGA Championship presented by'\n string1, string2 = split_string(mytext, 25, 25)\n self.assertEqual(string1, '2011 Senior PGA')\n self.assertEqual(string2, 'Championship presented')", "def test_no_truncation(self):\n fixed_splitter = utils.WidthSplitter((4, 3, 5, 6, 10, 10, 10, 10, 10, 10))\n line = \" 1 C 1 s -0.00000 -0.00000 0.00000\"\n ref_not_truncated = ['1', 'C', '1', 's', '-0.00000', '-0.00000', '0.00000', '', '', '']\n tokens_not_truncated = fixed_splitter.split(line, truncate=False)\n self.assertEqual(ref_not_truncated, tokens_not_truncated)", "def test_check_http_url_split(url, expected_split):\n assert http_urlsplit(url) == expected_split", "def sanity_check(self):\n return True", "def split(self) -> List[String]:\n pass", "def test_splitParamArgsProcessor(self):\n res = irc.ServerSupportedFeatures._splitParamArgs(\n [\"A:1\", \"B:2\", \"C\"], irc._intOrDefault\n )\n self.assertEqual(res, [(\"A\", 1), (\"B\", 2), (\"C\", None)])", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def test_tsdsplit(self):\n convert4 = cnv()\n # normal case\n self.assertEqual(convert4.tsdsplit('zweihunderteinundzwanzigtausendvierhundertdreiundfünfzig'),\n 221453)\n # case nothing behind\n self.assertEqual(convert4.tsdsplit('zweihundertfünfzehntausend'), 215000)\n # case nothing in front\n self.assertEqual(convert4.tsdsplit('tausendvierhundertfünfzig'), 1450)\n # case nothing in front nothing behind\n self.assertEqual(convert4.tsdsplit('tausend'), 1000)", "def test_split_data():\n from parrot import process_input_data as pid\n\n data_file = os.path.abspath(\"../data/seq_class_dataset.tsv\")\n train, val, test = pid.split_data(data_file, datatype='sequence',\n problem_type='classification', num_classes=3)\n\n assert (len(train) == 210) and (len(val) == 45) and (len(test) == 45) and (len(train[0]) == 3)", "def test_process_chunk_trimmed(self):\n foo = None\n chunk, order = self.chunk_procesor.process(foo, 1, 1, 1)\n expected = (12, 2, 14, 1)\n self.assertEqual(expected, chunk.shape)", "def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")", "def create_split(self) -> NoReturn:\n raise NotImplementedError", "def set_split(self):\n #Regular expressions; try 1 first, then 2, etc.\n rex1 = re.compile('F?LD')\n rex2 = re.compile('[LF]?LQ')\n \n #For regular expression, check if there is a match that is >10 AA from the end\n if re.search(rex1, self.sequence) and len(re.split(rex1, self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex1.finditer(self.sequence)][-1]\n# end += 16 #TODO why +15/16?\n elif re.search(rex2, self.sequence) and len(re.split(rex2,self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex2.finditer(self.sequence)][-1]\n# end += 15\n else:\n self.split_index = -1\n self.core = self.sequence\n self.leader = ''\n return\n self.split_index = end\n self.leader = self.sequence[:end]\n self.core = self.sequence[end:]", "def split_line_robust(line):\n\n line_split0 = [x.rstrip('\\n') for x in line.split(' ') if x]\n line_split1 = [x.split('\\t') for x in line_split0 if x]\n line_split = []\n for l_one in line_split1:\n for l_two in l_one:\n if l_two: line_split.append(l_two)\n return(line_split)", "def test_milsplit(self):\n convert5 = cnv()\n # normal case\n self.assertEqual(\n convert5.milsplit('einemilliondreihundertvierundzwanzigtausendsiebenhundertneunundachtzig'),\n 1324789)\n # case nothing behind\n self.assertEqual(convert5.milsplit('fünfundzwanzigmillionen'), 25000000)\n # case nothing in front\n self.assertEqual(convert5.milsplit('millionundzwei'), 1000002)\n # case nothing in front nothing behind\n self.assertEqual(convert5.milsplit('million'), 1000000)", "def splitCount(self):\n return 0", "def splitlines(self) -> List[String]:\n pass", "def test_splitlist():\n lst = [4, 2, 3, 1, 6, 7]\n lt, pi, gt = splitlist(lst)\n if lt == [2, 3, 1] and pi == 4 and gt == [6, 7]:\n print(\"test splitlist OK!\")\n else:\n print(\"test splitlist Failed!\")", "def test_MinimalSamParser(self):\r\n actual = list(MinimalSamParser(self.sam_data1))\r\n expected = self.sam1_expected\r\n self.assertEqual(actual, expected)", "def test_re_split_cell_raises_error(mock_amg):\n\n # splitting once should work as normal\n mock_amg.cells[4].split()\n\n with pytest.raises(ValueError):\n mock_amg.cells[4].split()", "def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)", "def testSplit(self):\n\n protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()\n bigstring = \"\".join(chr(byte) for byte in range(ord(\"a\"), ord(\"z\")+1))\n\n databuf = TTransport.TMemoryBuffer()\n prot = protocol_factory.getProtocol(databuf)\n prot.writeI32(42)\n prot.writeString(bigstring)\n prot.writeI16(24)\n data = databuf.getvalue()\n cutpoint = len(data)/2\n parts = [ data[:cutpoint], data[cutpoint:] ]\n\n framed_buffer = TTransport.TMemoryBuffer()\n framed_writer = TTransport.TFramedTransport(framed_buffer)\n for part in parts:\n framed_writer.write(part)\n framed_writer.flush()\n self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)\n\n # Recreate framed_buffer so we can read from it.\n framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())\n framed_reader = TTransport.TFramedTransport(framed_buffer)\n prot = protocol_factory.getProtocol(framed_reader)\n self.assertEqual(prot.readI32(), 42)\n self.assertEqual(prot.readString(), bigstring)\n self.assertEqual(prot.readI16(), 24)", "def test_split_arguments_and_remove_quotes(self, unused_test_name,\n input_string, expected):\n result = shlex.split(input_string)\n self.assertEqual(result, expected)", "def split(self, place_leaf_splitted):\n raise NotImplementedError", "def test_split_version(self):\n\n given = \"1.0.0.dev (Hello, World!)\"\n expected = [\"1\", \"0\", \"0\"]\n actual = Version.split_versions(given)\n\n self.assertEqual(expected, actual)", "def test_split_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first\n line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.5\", \"1.5\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"split-line\",\n )", "def test_get_items_from_string() -> None:\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i, ,p\")\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i- -p\", separator=\"-\")\n assert [\"i\", \" \", \" p\"] == common_util.get_items_from_string(\"i, , p\", remove_blanks=False)\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i, , p\")\n assert [] == common_util.get_items_from_string(\"\")", "def is_split_token(self):\n return '/' in self.token", "def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines", "def test_basic_parsers():", "def test_split_by_predicates_is_empty(self):\n integers = [1, 2, 3, 4]\n predicates = []\n\n r = tuple(multi_split_by(integers, predicates))\n self.assertEqual(1 + len(predicates), len(r))\n\n a, = r\n self.assertIsNotNone(a)\n a = _consume(a)\n self.assertEqual([1, 2, 3, 4], a)", "def test_probabilistic_parsers():", "def guess_splitwords():\n\n if t_word[:2] == 'un' and (t_pos == 'ADJD' or t_pos == 'ADJA'):\n create_splitword_tags(t_word[:2], t_word[2:])\n create_negation_frame()\n create_splitword_target(t_word[:2])\n create_splitword_focus(t_word[2:])\n create_splitword_negated(t_word[2:])\n create_splitword_scope(t_word[2:])", "def split(input, output, fields, delimiter, encoding, verbose, format_in, zipfile, gzipfile, chunksize, filter):\n if verbose:\n enableVerbose()\n options = {}\n options['delimiter'] = delimiter\n options['fields'] = fields\n options['output'] = output\n options['encoding'] = encoding\n options['format_in'] = format_in\n options['zipfile'] = zipfile\n options['gzipfile'] = gzipfile\n options['chunksize'] = chunksize\n options['filter'] = filter\n acmd = Selector()\n acmd.split(input, options)\n pass", "def test_no_arg(self):\n self.assertRaises(ValueError, NewickTokenizer)", "def test_data_split_bad():\n\tdf = pd.read_csv(\"test/sample_data.csv\")\n\tsplit_data = {'test_size': 0.4, 'random_state': 0}\n\n\twith pytest.raises(KeyError) as e:\n\t\tX_train, X_test, y_train, y_test = data_split(df, 'orange', split_data)\n\n\tassert str(e.value) == \"'Target column does not exist!'\"", "def safe_split(self, text):\n try:\n words = self.shlex_split(text)\n return words\n except:\n return text", "def test_whitespace(self):\n self.assertRaises(ParseException, self.flag.parseString, ' ')", "def test_wordMatch(self):\n words = []\n for line in self.output:\n words.extend(string.split(line))\n self.failUnless(self.sampleSplitText == words)", "def test_splits(self):\n \n checkit=subprocess.run([\"python\", \"../../taxonomy/src_files/write_qiime_train_db.py\", \"-i\", \"../resource_files/spaces.fna\", \"-o\", \"../processed_files/test_spaces.fna\"], capture_output=True, text=True)\n \n get_processed=os.listdir(\"../processed_files\")\n self.assertIn(\"test_spaces.fna\", get_processed)\n \n with open(\"../processed_files/test_spaces.fna\", 'r') as f:\n lines=f.readlines()\n \n self.assertEqual(lines[0].strip(), \">GY203941.1.1493\tBacteria;Bacteroidetes;Bacteroidia;Bacteroidales;Prevotellaceae;Prevotella_7;unidentified\")\n \n os.remove(\"../processed_files/test_spaces.fna\")", "def test_simple_parse(self):\n pass", "def test_wordCount(self):\n words = []\n for line in self.output:\n words.extend(string.split(line))\n wordCount = len(words)\n sampleTextWordCount = len(self.sampleSplitText)\n self.failUnlessEqual(wordCount, sampleTextWordCount)", "def test_no_start_open_parens(self):\n self.assertRaises(ValueError, NewickTokenizer, newick='hi')", "def test_no_chunk_size_no_n_splits_provided(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks([]))", "def validate_string_split(self, pattern, file):\r\n try:\r\n file_open = open(file, 'r')\r\n except:\r\n logging.info(\"file not found\")\r\n return -1\r\n file_data = file_open.read()\r\n ret_out = re.split(pattern, file_data)\r\n if ret_out:\r\n return True, ret_out\r\n else:\r\n return False, ret_out", "def _pre_argument_parsing(self):\n pass", "def _test_parts(self):\n\n message = []\n\n if self._year is None:\n message.append(\"No year\")\n else:\n try:\n int(self._year)\n except:\n message.append(\"Bad year {}\".format(self._year))\n\n if not self._release:\n message.append(\"No release\")\n else:\n try:\n assert (int(self._release) in [1, 5])\n except:\n message.append(\"Bad release {}\".format(self._release))\n\n if not self._state:\n message.append(\"No state\")\n elif len(self._state) != 2:\n message.append(\"Bad state {}\".format(self._state))\n\n if not self._record_type:\n message.append(\"No record_type\")\n else:\n try:\n assert(self._record_type.upper()[0] in ['H','P'])\n except:\n message.append(\"Bad record type {}\".format(self._record_type))\n\n return message", "def test_read_line(self):\n\n expected_data = ['\\\"lu, jr\\\"','ming-yuan','\\\"DRUG,1\\\"',135.999,True,3]\n input_string = '001,\\\"LU, JR\\\",MING-YUAN,\\\"DRUG,1\\\",135.999\\n'\n data = read_line(input_string)\n self.assertEqual(expected_data[0],data[0])\n self.assertEqual(expected_data[1],data[1])\n self.assertEqual(expected_data[2],data[2])\n self.assertAlmostEqual(expected_data[3],data[3])\n self.assertEqual(expected_data[4],data[4])\n self.assertAlmostEqual(expected_data[5],data[5])\n\n #Check for odd numers of quotation marks\n input_string = '001,\\\"LU\\\",\\\"MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for missing fields\n input_string = '001,,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for corrupted fields\n input_string = '001x,LU,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,1ag5\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])", "def split(self) -> Optional[str]:\n error_message: Optional[str] = None\n\n first_hand_cards: List[Card] = self.hands[0].cards\n if self.bet * 2 > self.actual_money:\n error_message = \"Cannot split because you have not enough money!\"\n\n elif len(self.hands) == 2:\n error_message = \"Cannot split because you have already splitted!\"\n\n elif len(first_hand_cards) != 2:\n error_message = \"Cannot split because you have already hit!\"\n\n elif first_hand_cards[0].name != first_hand_cards[1].name:\n error_message = \"Cannot split because your cards are not the same!\"\n\n else:\n self.bet *= 2\n\n cards: List[Card] = [first_hand_cards.pop(), self._deck.deal_card()]\n self._hands.append(Hand(deck=self._deck, from_cards=cards))\n\n self._hands[0].deal_card()\n\n return error_message", "def test_explode_basic(base_clumper):\n assert len(base_clumper.explode(d=\"data\")) == 2 * len(base_clumper)", "def testA_FileSplitting(self):\n splitter = SplitterFactory()\n\n oneSetSubscription = self.createSubscription(nFiles=10, lumisPerFile=1)\n jobFactory = splitter(package=\"WMCore.WMBS\", subscription=oneSetSubscription)\n\n jobGroups = jobFactory(lumis_per_job=3, halt_job_on_file_boundaries=True, performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 1)\n self.assertEqual(len(jobGroups[0].jobs), 10)\n for job in jobGroups[0].jobs:\n self.assertTrue(len(job['input_files']), 1)\n self.assertEqual(job['estimatedJobTime'], 100 * 12)\n self.assertEqual(job['estimatedDiskUsage'], 100 * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)\n\n twoLumiFiles = self.createSubscription(nFiles=5, lumisPerFile=2)\n jobFactory = splitter(package=\"WMCore.WMBS\", subscription=twoLumiFiles)\n jobGroups = jobFactory(lumis_per_job=1, halt_job_on_file_boundaries=True, performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 1)\n self.assertEqual(len(jobGroups[0].jobs), 10)\n for job in jobGroups[0].jobs:\n self.assertEqual(len(job['input_files']), 1)\n self.assertEqual(job['estimatedJobTime'], 50 * 12)\n self.assertEqual(job['estimatedDiskUsage'], 50 * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)\n\n wholeLumiFiles = self.createSubscription(nFiles=5, lumisPerFile=3)\n jobFactory = splitter(package=\"WMCore.WMBS\", subscription=wholeLumiFiles)\n jobGroups = jobFactory(lumis_per_job=2, halt_job_on_file_boundaries=True, performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 1)\n # 10 because we split on run boundaries\n self.assertEqual(len(jobGroups[0].jobs), 10)\n jobList = jobGroups[0].jobs\n for idx, job in enumerate(jobList, start=1):\n # Have should have one file, half two\n self.assertEqual(len(job['input_files']), 1)\n if idx % 2 == 0:\n self.assertEqual(job['estimatedJobTime'], (1.0 * round(100 / 3)) * 12)\n self.assertEqual(job['estimatedDiskUsage'], (1.0 * round(100 / 3)) * 400)\n else:\n self.assertEqual(job['estimatedJobTime'], (2.0 * round(100 / 3)) * 12)\n self.assertEqual(job['estimatedDiskUsage'], (2.0 * round(100 / 3)) * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)\n\n mask0 = jobList[0]['mask'].getRunAndLumis()\n self.assertEqual(mask0, {0: [[0, 1]]})\n mask1 = jobList[1]['mask'].getRunAndLumis()\n self.assertEqual(mask1, {0: [[2, 2]]})\n mask2 = jobList[2]['mask'].getRunAndLumis()\n self.assertEqual(mask2, {1: [[100, 101]]})\n mask3 = jobList[3]['mask'].getRunAndLumis()\n self.assertEqual(mask3, {1: [[102, 102]]})\n\n j0 = Job(id=jobList[0]['id'])\n j0.loadData()\n self.assertEqual(j0['mask'].getRunAndLumis(), {0: [[0, 1]]})\n\n # Do it with multiple sites\n twoSiteSubscription = self.createSubscription(nFiles=5, lumisPerFile=2, twoSites=True)\n jobFactory = splitter(package=\"WMCore.WMBS\",\n subscription=twoSiteSubscription)\n jobGroups = jobFactory(lumis_per_job=1,\n halt_job_on_file_boundaries=True,\n performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 2)\n self.assertEqual(len(jobGroups[0].jobs), 10)\n for job in jobGroups[0].jobs:\n self.assertEqual(len(job['input_files']), 1)\n self.assertEqual(job['estimatedJobTime'], 50 * 12)\n self.assertEqual(job['estimatedDiskUsage'], 50 * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)", "def test_general_subset_invalid_space():\n pass", "def test_examples():\n assert nz_bank_validate(*'01-902-0068389-00'.split('-'))\n assert nz_bank_validate(*'08-6523-1954512-001'.split('-'))\n assert nz_bank_validate(*'26-2600-0320871-032'.split('-'))", "def test_process_data(self):\n pass", "def test_n_splits(self):\n for n_splits, expected_n_chunks in [(1, 1), (3, 3), (150, 100)]:\n with self.subTest(n_splits=n_splits):\n iterable_of_args, iterable_len, chunk_size, n_splits_ = apply_numpy_chunking(\n self.test_data_numpy, n_splits=n_splits\n )\n\n # Materialize generator and test contents. We simply test if every row of the original input occurs in\n # the chunks\n iterable_of_args = list(iterable_of_args)\n self.assertEqual(len(iterable_of_args), expected_n_chunks)\n offset = 0\n for chunk in iterable_of_args:\n self.assertIsInstance(chunk[0], np.ndarray)\n np.testing.assert_array_equal(chunk[0], self.test_data_numpy[offset:offset + len(chunk[0])])\n offset += len(chunk[0])\n self.assertEqual(offset, 100)\n\n # Test other output\n self.assertEqual(iterable_len, expected_n_chunks)\n self.assertEqual(chunk_size, 1)\n self.assertIsNone(n_splits_)\n\n # chunk_size and n_splits can't be both None\n with self.subTest(n_splits=None), self.assertRaises(ValueError):\n iterable_of_args, *_ = apply_numpy_chunking(self.test_data_numpy, n_splits=None)\n list(iterable_of_args)", "def test_split_by_iterable_is_empty(self):\n integers = []\n predicates = [predicate_1, predicate_2]\n\n r = list(multi_split_by(integers, predicates))\n self.assertEqual(1 + len(predicates), len(r))\n\n a, b, c = r\n self.assertIsNotNone(a)\n self.assertIsNotNone(b)\n self.assertIsNotNone(c)\n\n a = _consume(a)\n b = _consume(b)\n c = _consume(c)\n\n self.assertEqual([], a)\n self.assertEqual([], b)\n self.assertEqual([], c)", "def __check(self):\n assert self.name is not None, \"Empty name!\"\n assert self.in_spc, \"Empty in_spc!\"\n assert self.out_spc, \"Empty out_spc!\"\n assert self.num_clss > 0, \"Invalid number of output classes!\"\n if not isinstance(self, SvmSklearnWrapper):\n assert self.los_fnc is not None, \"No loss function!\"\n assert self.opt is not None, \"No optimizer!\"", "def test_data_split(irregular_written_data, split_data):\n\n unsplit_fp, _ = irregular_written_data\n with openEDF(unsplit_fp) as reader:\n unsplit_data = reader.read(start=0)\n\n for fp, chs in split_data.items():\n with openEDF(fp) as reader:\n arr = reader.read(start=0)\n\n # since sample rates differ we go to max shape\n nsamples = arr.shape[-1]\n assert np.allclose(arr, unsplit_data[chs, :nsamples], equal_nan=True)", "def test_split(range_size, partition_size):\n dump = Mock()\n\n iterable = list(range(range_size))\n\n list(_split(partition_size=partition_size, dump=dump, iterable=iterable))\n expected_call_count = (range_size // partition_size) + int(bool(range_size % partition_size))\n\n assert dump.call_count == expected_call_count" ]
[ "0.7558807", "0.69345814", "0.6754861", "0.6698345", "0.6582555", "0.6470959", "0.644443", "0.6433854", "0.6428578", "0.63183737", "0.63103896", "0.6191666", "0.61298126", "0.60792583", "0.6011742", "0.5989836", "0.5981029", "0.5925554", "0.589626", "0.58922535", "0.58776075", "0.58716273", "0.5863586", "0.5818711", "0.5815915", "0.5795804", "0.5782531", "0.5776352", "0.57734954", "0.5738207", "0.57319313", "0.5709898", "0.5666285", "0.5662646", "0.56509906", "0.5650848", "0.55990946", "0.55941033", "0.5593793", "0.5589496", "0.55788517", "0.5555071", "0.55485106", "0.55395734", "0.548129", "0.5465612", "0.54593813", "0.54579175", "0.54475725", "0.54116976", "0.54113185", "0.54025364", "0.539834", "0.5391081", "0.53844166", "0.5353604", "0.5335514", "0.53093314", "0.52960575", "0.52923787", "0.5285193", "0.52666104", "0.5262211", "0.52575463", "0.5256226", "0.52502304", "0.52460146", "0.5231034", "0.52223015", "0.5210858", "0.5204601", "0.52038896", "0.51972115", "0.5196417", "0.5194061", "0.5185583", "0.518465", "0.51743686", "0.51707625", "0.51706475", "0.5153924", "0.5150966", "0.5147721", "0.5144716", "0.5137135", "0.51339597", "0.51312804", "0.5115353", "0.51076823", "0.5093254", "0.50914216", "0.50897956", "0.5084688", "0.5077445", "0.5072433", "0.50641954", "0.5050221", "0.5049501", "0.5046819", "0.504341", "0.5036675" ]
0.0
-1
Basic checks for columns and rows values
def validate_image_col_row(image, col, row): SPLIT_LIMIT = 99 try: col = int(col) row = int(row) except BaseException: raise ValueError("columns and rows values could not be cast to integer.") if col < 1 or row < 1 or col > SPLIT_LIMIT or row > SPLIT_LIMIT: raise ValueError( f"Number of columns and rows must be between 1 and" f"{SPLIT_LIMIT} (you asked for rows: {row} and col: {col})." ) if col == 1 and row == 1: raise ValueError("There is nothing to divide. You asked for the entire image.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getValidRowsCols(self) :\n colns = number_of_good_cols(self.r_sheet)\n rowns = number_of_good_rows(self.r_sheet)\n \n # Check whether the number of good columns and rows are correct\n while self.isEmptyRow(rowns-1, colns) :\n rowns = rowns - 1 \n while self.isEmptyColumn(colns-1, rowns) :\n colns = colns - 1\n \n self.log.debug('Number of rows with content: {0}'.format(rowns))\n self.log.debug('Number of columns with content: {0}'.format(colns))\n return rowns, colns", "def isValid(self):\n for ir in range(self.nRow): # Check rows for duplicates\n row = ir + 1\n vals = {}\n for ic in range(self.nCol):\n col = ic + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"doing row {row} at col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n \n for ic in range(self.nCol): # Check cols for duplicates\n col = ic + 1\n vals = {}\n for ir in range(self.nRow):\n row = ir + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"at row={row} doing col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n return True", "def check_cols(self):\r\n for i in range(3):\r\n if self.grid[i][-1] != ' ' and self.grid[i][-1] == self.grid[i+3][-1] and self.grid[i+3][-1] == self.grid[i+6][-1]:\r\n return (i, (self.grid[i], self.grid[i+6]))\r\n return (-1, None)", "def check_row(row):\n \n if len(row) != _ncols:\n raise ValueError(\"Row contains {0} columns, expected {1}!\\n\\n{2}\\n\".format(len(row), _ncols, row))", "def checkRows( self ):\n\n for x in [0,3,6]:\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+1]\n thirdVal = self.__grid[x+2]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if 'xx' in compiledVal.lower():\n\n return ('X', compiledVal)\n\n elif 'oo' in compiledVal.lower():\n\n return ('O', compiledVal) \n\n elif compiledVal.lower() == 'x2x' or \\\n compiledVal.lower() == 'x5x' or \\\n compiledVal.lower() == 'x8x':\n\n return ('X', compiledVal)\n \n return None", "def validate_data(self, row, col, value):\n\n return True", "def check_full_board(self): #rows then columns\n for row in self.board:\n for column_of_row in row:\n if column_of_row == ' ':\n return False\n return True", "def is_valid(self):\n if self.get_row() != -1 and self.get_column() != -1:\n return True\n else:\n return False", "def test_multiple(self):\n df = self.df.copy()\n out = get_full_column(df.values)\n self.assertTrue(out == 0)", "def _row_or_col_is_header(s_count, v_count):\n if s_count == 1 and v_count == 1:\n return False\n else:\n return (s_count + 1) / (v_count + s_count + 1) >= 2. / 3.", "def valid_input(self, row, col):\n return ((row, col) not in self.marks and\n row <= WIDTH and row > 0 and\n col in COL_MAP)", "def is_valid_row_or_col(val: str):\n try:\n val = int(val)\n if 1 <= val <= 10:\n return True\n return False\n except (ValueError, TypeError):\n return False", "def is_cols_valid(bd):\n for col in cols:\n seen = []\n for num in nums:\n if bd[col[num]] == \" \":\n continue\n elif bd[col[num]] not in seen:\n seen += [bd[col[num]]]\n else:\n return False\n else:\n continue\n return True", "def check_columns(self):\n\t\ti=0\n\t\tfor i in range(len(self.board[i])):\n\t\t\tpts = 0\n\t\t\tfor j in range(len(self.board)):\n\t\t\t\tif self.board[j][i] == self.marker:\n\t\t\t\t\tpts+=1\n\t\t\tif pts == 3:\n\t\t\t\tprint('YOU WON')\n\t\t\t\treturn True", "def check_columns():\n global game_still_going\n # Check if any of the rows have all the same value.\n column1 = board[0] == board[3] == board[6] != '_'\n column2 = board[1] == board[4] == board[7] != '_'\n column3 = board[2] == board[5] == board[8] != '_'\n # If any column does have a match, then game still going to False.\n if column1 or column2 or column3:\n game_still_going = False\n # Return winner 'X' or 'O'.\n if column1:\n return board[0]\n if column2:\n return board[1]\n if column3:\n return board[2]", "def is_posssible_col(self,col,user_value):\n for row in range(9):\n if self.arr[row][col] == user_value:\n logging.debug(f\"is_posssible_col row(): (False) row: {row} col: {col} arr{self.arr[row][col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_col row(): (True) row: {row} col: {col} arr{self.arr[row][col]} != {user_value}\")\n return True", "def test_row_from_columns_no_errors(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def validate(self, row):\n raise NotImplementedError", "def validRowCol(content,start,row,schedule):\n\t\t\tif validRow(content,start,row) and \\\n\t\t\t\tvalidCol(content,start,schedule):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False", "def checkColumns( self ):\n\n for x in list(range(0,3)):\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+3]\n thirdVal = self.__grid[x+6]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if 'xx' in compiledVal.lower():\n return ('X', compiledVal)\n\n elif 'oo' in compiledVal.lower():\n return ('O', compiledVal)\n\n elif compiledVal.lower() == 'x4x' or \\\n compiledVal.lower() == 'x5x' or \\\n compiledVal.lower() == 'x6x':\n\n return ('X', compiledVal) \n\n return None", "def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True", "def is_valid_position(self, somerow, somecol):\n valid_row = 0 <= somerow <= (self.size-1)\n valid_col = 0 <= somecol <= (self.size-1)\n #need to use self.size - 1 because while we're thinking of an 8x8 chess board, the computer is thinking of a 0x7 chess board\n return valid_row and valid_col", "def isEmptyRow(self, i, colns):\n for j in range(0,colns) :\n if not self.isEmpty(i,j):\n return False\n return True", "def test_row_from_columns_no_errors_table(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def check(l):\n rows = split_rows(l)\n columns = split_columns(l)\n for r in rows + columns:\n if 0 in r:\n continue\n if sum(r) != row_sum:\n return False\n return True", "def check_square(self):\n if self.rows != self.cols:\n raise IndexError(\"Matrix is not square\")", "def _validate_row(self, row):\n\n # assume value.\n is_valid = True\n\n # test if each field in @row has the correct data type.\n tests = []\n for field, value in row.items():\n value_type, header_type = (type(value).__name__, \n self.required_headers[field].__name__)\n test = value_type == header_type\n if not test:\n err = \"Field '{}' not valid; expected '{}', got '{}'.\".format(field,\n header_type, value_type)\n self.logger.debug(err)\n tests.append(test)\n\n # if any test failed, set @is_valid to False.\n if False in tests:\n is_valid = False\n \n return is_valid", "def _validateRowCol(self, rows, cols, numRow, numCol, dvName):\n if rows is not None:\n rowArr = np.array(rows)\n if np.max(rowArr) > numRow:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numRow)\n + \" rows and index up to \"\n + str(np.max(rowArr))\n + \" was specified: \"\n + str(rows)\n )\n if np.min(rowArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Row index less than 1 specified: \"\n + str(rows)\n )\n if len(rows) != len(set(rows)):\n # duplicates\n raise Error(\"Duplicate indices specified in the rows of design variable \" + dvName + \": \" + str(rows))\n\n if cols is not None:\n colArr = np.array(cols)\n if np.max(colArr) > numCol:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numCol)\n + \" cols and index up to \"\n + str(np.max(colArr))\n + \" was specified: \"\n + str(cols)\n )\n if np.min(colArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"col index less than 1 specified: \"\n + str(cols)\n )\n if len(cols) != len(set(cols)):\n # duplicates\n raise Error(\"Duplicate indices specified in the cols of design variable \" + dvName + \": \" + str(cols))", "def valid_col_tester(self, state):\n vert_state = self.cols(state)\n for line in vert_state:\n line_index = vert_state.index(line)\n vert_word = self.check_word(vert_state[line_index])\n if not(vert_word):\n return False\n return True", "def test_find_row_col_indices(self):\r\n obs = self.mc._find_row_col_indices(0)\r\n self.assertEqual(obs, (1, 0))\r\n\r\n obs = self.mc._find_row_col_indices(1)\r\n self.assertEqual(obs, (2, 0))\r\n\r\n obs = self.mc._find_row_col_indices(2)\r\n self.assertEqual(obs, (2, 1))\r\n\r\n obs = self.mc._find_row_col_indices(3)\r\n self.assertEqual(obs, (3, 0))\r\n\r\n obs = self.mc._find_row_col_indices(4)\r\n self.assertEqual(obs, (3, 1))\r\n\r\n obs = self.mc._find_row_col_indices(5)\r\n self.assertEqual(obs, (3, 2))\r\n\r\n obs = self.mc._find_row_col_indices(6)\r\n self.assertEqual(obs, (4, 0))\r\n\r\n self.assertRaises(IndexError, self.mc._find_row_col_indices, -1)", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def check_bounds(self, row: int, col: int) -> bool:\n return 0 <= row < self.row and 0 <= col < self.col", "def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True", "def test_row_from_columns_has_errors(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['first', 'billing_address_1', 'billing_country_area']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n has_no_errors = setup[-1]\n for row in setup:\n if row == has_no_errors:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n else:\n self.assertGreater(len(row['expected']), 1)\n self.assertGreater(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def _validate_values(self, values):\n prev_len = -1\n i = j = -1\n if values is None or len(values) == 0:\n self.shape = 0, 0\n return\n for i, row in enumerate(values):\n if prev_len == -1:\n prev_len = len(row)\n if prev_len != len(row):\n raise ValueError(f\"Row {i} differs in length: {prev_len} != {len(row)}\")\n for j, val in enumerate(row):\n if type(val) not in (int, float, complex):\n raise ValueError(f\"[{i}, {j}]: {val} is of bad type ({type(val)})\")\n if val == 0:\n self.empty_loc = (i, j)\n if i == -1:\n self.shape = 0, 0\n else:\n self.shape = i + 1, j + 1", "def check_rows(self):\r\n for i in range(0, len(self.grid),3):\r\n if self.grid[i][-1] != ' ' and self.grid[i][-1] == self.grid[i+1][-1] and self.grid[i+1][-1] == self.grid[i+2][-1]:\r\n return (i, (self.grid[i], self.grid[i+2]))\r\n return (-1, None)", "def check_column_count(cls, line):\n\n # MAGIC n_cols = n_delim + 1 (no trailing delimiter)\n cols = line.count(cls.DELIMITER) + 1\n expected = 7 # MAGIC USAA convention, not all are populated though\n return cols == expected", "def bad_column_positions(self, x):\n return x.is_null()", "def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)", "def assert_check_bounds_column(self):\n value = logic.check_bounds_column(config.NR_COLS-1)\n self.assertTrue(value)\n value = logic.check_bounds_column(config.NR_COLS)\n self.assertFalse(value)\n value = logic.check_bounds_column(config.NR_COLS+1)\n self.assertFalse(value)", "def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])", "def test_row_from_columns_not_own_error_row(self):\n errors_on_separate_row = False\n field_setup = None\n error_names = None\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def check_column_values(self, values):\n none_keys = sorted(list(self._necessary_input_columns.intersection(set([elem for elem in self._columns if values[self.column_id[elem]] in [None, 'None']]))))\n if len(none_keys) > 0:\n raise Exception('missing_keys in ForcingOnMesh_DBManager add function parameter file_info:\\n%s\\n'%('\\n'.join([' - %s'%elem for elem in none_keys])))", "def test_row_from_columns_not_own_error_row_table(self):\n errors_on_separate_row = False\n field_setup = None\n error_names = None\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def check_column(data, functions, threshold):\n\n # Apply the functions to the column to return True/False values\n data = data.map(lambda x: any(f(x) for f in functions)).copy()\n # Identify the number of incorrect values\n data_invalid = data.sum()\n # Identify the number of values in the column\n data_total = data.count()\n\n if data_total == 0 or data_invalid == data_total:\n # If entire column is invalid or empty\n logging.error(\"Header supplied, but all data invalid or missing.\")\n return False\n else:\n # Find the percentage of invalid values\n data_perc = data_invalid / data_total\n\n # Check whether the % invalid values is above or below the threshold\n if data_perc > threshold:\n logging.error(f\"{data_perc*100 : .2f}% of values are blank or invalid\")\n return False\n\n return True", "def check_cols(self):\n if self.ad_tab is not None and 'date' not in self.ad_cols:\n raise DataException(\"\"\"date column not found in adServer table.\"\"\")\n if self.ad_tab is not None and 'impressions' not in self.ad_cols:\n raise DataException(\"\"\"impressions column not found in adServer table.\"\"\")\n if 'timestamp' not in self.log_cols and 'date' not in self.log_cols:\n raise DataException(\"\"\"Both timestamp and date column missing from {t}\nCannot do dailyQA\"\"\".format(t=self.log_tab))\n if self.configs['hourshift'] != 0 or 'date' not in self.log_cols:\n if 'timestamp' not in self.log_cols:\n raise DataException(\"\"\"Time shift requested \\\nbut no timestamp column in {t}.\"\"\".format(t=self.log_tab))\n else:\n check_timestamp(self.configs['schema'], self.log_tab)", "def test_002_range_columns(self):\n assert(len(\n self.range_transformer.fit_transform(\n self.data[self.range_col]\n ).columns\n ) == 1)", "def check_cols_methane(name):\n return True if name in ['SampleDay', 'SampleHour', 'Decimal Year',\n 'Peak Area 1', 'Peak Area 2', 'Run median', 'Daily Median'] else False", "def test_get_error_data_all_col_errors(self):\n field_setup = None\n error_names = None\n prepared_info = self.setup_error_data(field_setup, error_names)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])", "def check_columns(self, win: list) -> bool:\r\n for row in range(self.size):\r\n column = [self.tags[x][row] for x in range(self.size)]\r\n for j in range(len(column) - len(win) + 1):\r\n if win == column[j:j+self.win_condition]:\r\n return True", "def validate_csv(filename, header, cols, rows):\n\n # open file\n data = pd.read_csv(filename, delimiter='|')\n\n # validate header\n assert header == '|'.join(list(data.columns.values))\n\n # validate column count\n assert data.shape[1] == cols\n\n # validate row count\n assert data.shape[0] == rows\n\n # return (header_result == column_result == row_result) is True", "def test_row_from_columns_has_errors_table(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['first', 'billing_address_1', 'billing_country_area']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n has_no_errors = setup[-1]\n for row in setup:\n if row == has_no_errors:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n else:\n self.assertGreater(len(row['expected']), 1)\n self.assertGreater(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def check_meatadata_row(validated, input_validate_dict, row, idx):\n\n if row['RealCrystalName'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'RealCrystalName spaces or null', idx + 2)\n validated = False\n if row['crystal_name'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name spaces or null', idx + 2)\n validated = False\n if row['RealCrystalName'] not in row['crystal_name']:\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name does not contain RealCrystalName', idx + 2)\n validated = False\n if row['smiles'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Smiles null', idx + 2)\n validated = False\n\n return validated, input_validate_dict", "def isEmptyColumn(self, j, rowns ):\n for i in range(0,rowns) :\n if not self.isEmpty(i,j):\n return False\n return True", "def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False", "def verify(self):\n for col in self.columns:\n if col not in self.table_obj.columns.keys():\n raise Exception('{} column not found in {}'.format(\n col, self.table_obj))", "def checkWithinBound(rowWithinBound,colWithinBound):\n if(rowWithinBound == 0 and colWithinBound == 0):\n return True\n else:\n return False", "def is_board_valid(bd):\n return is_rows_valid(bd) and is_cols_valid(bd) and is_sqrs_valid(bd)", "def checkcolumnstest(chosen_columns, chosen_df):\n if not all([item in chosen_columns for item in chosen_df.columns]):\n raise ValueError('Columns do not match')", "def main(self, *args, **kwds):\n # go through the columns\n for column, offset in self.columns.items():\n # verify they were all converted to integers\n assert type(offset) is int\n\n # all done\n return 0", "def _validate_from_plain(data: Sequence[Sequence],\n columns: Sequence[str],\n dtypes: Sequence[str],\n row_wise: bool):\n\n if row_wise:\n # assert equal number of elements across rows\n row_lenghts = {len(row) for row in data}\n if len(row_lenghts) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"row. Please check provided input data\")\n\n # assert equal number of columns and elements per row\n row_lenghts.add(len(columns))\n if len(row_lenghts) > 1:\n raise ValueError(\n \"Number of columns has to equal the number of \"\n \"values per row. Please check column names and \"\n \"provided input data.\")\n\n # assert equal number of dtypes and elements per row\n row_lenghts.add(len(dtypes))\n if len(row_lenghts) > 1:\n raise ValueError(\"Number of dtypes has to equal the number of \"\n \"values per row. Please check dtypes and \"\n \"provided input data.\")\n\n else:\n # assert equal number of elements across columns\n col_lengths = {len(col) for col in data}\n if len(col_lengths) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"columns. Please check provided input data\")\n\n # assert equal number of columns in data, column names and dtypes\n col_count = len(columns)\n if col_count != len(data):\n raise ValueError(\"Input data and column names have different \"\n \"amount of columns. Please check provided \"\n \"input data\")\n\n if col_count != len(dtypes):\n raise ValueError(\"Input data and dtypes have different \"\n \"amount of columns. Please check provided \"\n \"input data\")", "def get_cols_dummy():", "def test_overall_report_columns():\n assert (len(overall_data['columns']) == 31)", "def checkMatrix(i,j):\n # print(i,j)\n counter = Counter([grid[di][dj] for di in range(i,i+3) for dj in range(j,j+3)])\n for k in range(1,10):\n if counter[k] != 1:\n return False\n\n rows_sum = [sum(grid[k][j:j+3]) for k in range(i,i+3)]\n # print(rows_sum)\n if not all(m == 15 for m in rows_sum):\n return False\n cols_sum = [sum(grid[q][k] for q in range(i,i+3)) for k in range(j,j+3)]\n # print(cols_sum)\n if not all(m == 15 for m in cols_sum):\n return False\n dgl_sum = sum(grid[i+k][j+k] for k in range(3))\n anti_dgl_sum = sum(grid[i+k][j+2-k] for k in range(3))\n # print(dgl_sum, anti_dgl_sum)\n if dgl_sum != 15 or anti_dgl_sum != 15:\n return False\n return True", "def onlyrow(self):\n return self.y <= 1", "def validate_dataset(columns, rows):\n # Ensure that all column identifier are zero or greater, unique, and smaller\n # than the column counter (if given)\n col_ids = set()\n for col in columns:\n if col.identifier < 0:\n raise ValueError('negative column identifier \\'' + str(col.identifier) + '\\'')\n elif col.identifier in col_ids:\n raise ValueError('duplicate column identifier \\'' + str(col.identifier) + '\\'')\n col_ids.add(col.identifier)\n # Ensure that all row identifier are zero or greater, unique, smaller than\n # the row counter (if given), and contain exactly one value for each column\n row_ids = set()\n for row in rows:\n if len(row.values) != len(columns):\n raise ValueError('schema violation for row \\'' + str(row.identifier) + '\\'')\n elif row.identifier < 0:\n raise ValueError('negative row identifier \\'' + str(row.identifier) + '\\'')\n elif row.identifier in row_ids:\n raise ValueError('duplicate row identifier \\'' + str(row.identifier) + '\\'')\n row_ids.add(row.identifier)\n return max(col_ids) if len(col_ids) > 0 else -1, max(row_ids) if len(row_ids) > 0 else -1", "def test_num_columns(self):\n pass", "def TestInput( data, options ) :\n columns = data.columns\n return all( x in columns for x in options)", "def is_rows_valid(bd):\n for row in rows:\n seen = []\n for num in nums:\n if bd[row[num]] == \" \":\n continue\n elif bd[row[num]] not in seen:\n seen += [bd[row[num]]]\n else:\n return False\n else:\n continue\n return True", "def conformability(self, other):\r\n if self.columns == other.rows:\r\n return True\r\n else:\r\n return False", "def test_too_many_cols1():\n text = dedent(\n \"\"\"\n A B C\n 1 2 3\n 4 5 6\n 7 8 9 10\n 11 12 13\n \"\"\"\n )\n with pytest.raises(InconsistentTableError) as e:\n FastBasic().read(text)\n assert (\n \"Number of header columns (3) inconsistent with data columns in data line 2\"\n in str(e.value)\n )", "def __checkcolumns(self, lista: List[str]) -> True:\r\n\r\n if isinstance(lista, list) is False:\r\n raise TypeError(f\"{lista} has to be a list.\")\r\n if len(lista) != 10:\r\n raise ValueError(f\"{lista} must have 10 columns\")\r\n\r\n errorlista = []\r\n\r\n # Regarding 'self.tiposDisponiveis',\r\n # Layer and Marked happens on the same column.\r\n # if there is 'layer', 'marked' won't show up, and viceversa.\r\n # Therefore 'self.tiposDisponiveis' is a list with 11 elements. While 'lista' is a list with 10 elements.\r\n\r\n for _ in lista:\r\n # searching for 'Layer'\r\n if self.tiposDisponiveis[0].lower() == _.lower():\r\n break\r\n else:\r\n # if 'Layer' wasn't found, searching for 'Marked'\r\n for _ in lista:\r\n if self.tiposDisponiveis[1].lower() == _.lower():\r\n break\r\n else:\r\n # If none of the two are present on the line, add to the error list\r\n errorlista.append(\"Layer Or Marked\")\r\n \r\n # repeat the search for all the remaining required values\"\"\"\r\n for _ in range(2, len(self.tiposDisponiveis)-1):\r\n for x in lista:\r\n if x.lower() == self.tiposDisponiveis[_].lower():\r\n break\r\n else:\r\n # Didn't find this column in the list\r\n errorlista.append(f\"{self.tiposDisponiveis[_]}\")\r\n\r\n # Raising the errors, if any occurred.\r\n if len(errorlista) > 0:\r\n raise ValueError(f\"{errorlista} <- These columns are missing from format.\")\r\n\r\n # Last column has to be 'Text'\r\n if lista[9].lower() != self.tiposDisponiveis[10].lower():\r\n raise ValueError(f\"{lista[9]} last element has to be 'Text'.\")\r\n \r\n return True", "def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0", "def valid(self, col, rank):\n return rank <= 8 and rank > 0 and self.getColIdx(col) < 8 and self.getColIdx(col) >= 0", "def check_columns():\n global ongoing_game\n column_1 = board[0] == board[3] == board[6] != \"*\"\n column_2 = board[1] == board[4] == board[7] != \"*\"\n column_3 = board[2] == board[5] == board[8] != \"*\"\n if column_1 or column_2 or column_3:\n ongoing_game = False\n if column_1:\n return board[0]\n elif column_2:\n return board[1]\n elif column_3:\n return board[2]\n else:\n return None", "def test_get_error_data_table_all_col_errors(self):\n field_setup = None\n error_names = None\n prepared_info = self.setup_error_data(field_setup, error_names, True)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])", "def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()", "def check_rows():\n global game_still_going\n # Check if any of the rows have all the same value.\n row1 = board[0] == board[1] == board[2] != '_'\n row2 = board[3] == board[4] == board[5] != '_'\n row3 = board[6] == board[7] == board[8] != '_'\n # If any row does have a match, then game still going to False.\n if row1 or row2 or row3:\n game_still_going = False\n # Return winner 'X' or 'O'.\n if row1:\n return board[0]\n if row2:\n return board[3]\n if row3:\n return board[6]", "def valid_coverage_cell(self, row, column):\n\n if (row < self.cov_grid.shape[0] and\n row >= 0) and \\\n (column < self.cov_grid.shape[1] and\n column >= 0):\n return True\n else:\n return False", "def _validate_indexes(self, row, col):\n if min(row, col) < 0 or max(row, col) >= self._n:\n raise IndexError(\n \"Incorrect position (%d, %d) in grid of size %d\" % (\n row, col, self._n\n )\n )", "def check_rows(self):\n\t\tfor i in range(len(self.board)):\n\t\t\tpts = 0\n\t\t\tfor j in range(len(self.board[i])):\n\t\t\t\tif self.board[i][j] == self.marker:\n\t\t\t\t\tpts+=1\n\t\t\tif pts == 3:\n\t\t\t\tprint('YOU WON')\n\t\t\t\treturn True", "def _validate_plaincolumns(self):\n\n # assert tuples for plaincolumns and plaincolumns to be PlainColumn\n if not isinstance(self.plaincolumns, tuple):\n raise ValueError(\"PlainFrame was instantiated incorrectly. \"\n \"`plaincolumns` needs to be of type `tuple`. \"\n \"However, {} was encountered. Please use \"\n \"`PlainFrame.from_plain` instead for convenient \"\n \"instantiation and proper type casts.\"\n .format(type(self.plaincolumns)))\n\n not_plaincolumn = [type(column)\n for column in self.plaincolumns\n if not isinstance(column, PlainColumn)]\n\n if not_plaincolumn:\n raise ValueError(\"PlainFrame was instantiated incorrectly. \"\n \"Elements of `plaincolumns` needs to be of type \"\n \"`PlainColumn`. However, {} was encountered. \"\n \"Please use `PlainFrame.from_plain` instead for \"\n \"convenient instantiation and proper type casts.\"\n .format(not_plaincolumn))\n\n # assert equal number of values per column\n row_lenghts = {len(column.values) for column in self.plaincolumns}\n if len(row_lenghts) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"column. Please check provided input data.\")\n\n # assert unique column names\n duplicates = {x for x in self.columns if self.columns.count(x) > 1}\n if duplicates:\n raise ValueError(\"Duplicated column names encountered: {}. \"\n \"Please use unique column names.\"\n .format(duplicates))", "def test_sanity_check (self):\n X, Y = self.dm.get_data(std=True, lag_indicator=True)\n\n # Ensure number of rows between what we expect.\n row_bound = (800, 1000)\n actual_rows = X.shape[0]\n msg = 'Number of rows not within expected bounds.'\n self.assertTrue(row_bound[0] < actual_rows < row_bound[1], msg)\n\n msg = 'X and Y have different number of rows.'\n self.assertEqual(X.shape[0], Y.shape[0], msg)\n\n # Ensure X columns match.\n expected_x_cols = ['SP500', 'ltc_px_std', 'xrp_px_std', 'xlm_px_std',\n 'eth_px_std', 'btc_px_std', 'ltc_volume_std',\n 'xrp_volume_std', 'xlm_volume_std', 'eth_volume_std',\n 'btc_volume_std', 'lagged_others']\n actual_x_cols = X.columns.tolist()\n msg = 'Number of X columns different than expected.'\n self.assertEqual(len(actual_x_cols), len(expected_x_cols), msg)\n\n for col in expected_x_cols:\n msg = 'Expected column not found: {}'.format(col)\n self.assertTrue(col in actual_x_cols, msg)", "def test_arguments(self):\n\n h.test_function_arguments(\n func=BaseTransformer.columns_check, expected_arguments=[\"self\", \"X\"]\n )", "def checkvalid(self,borad,row,col,n):\n # check the above column has 'Q'\n i=0\n while i!=row:\n if borad[i][col]=='Q':\n return False\n i+=1\n # check the left-top 135 and right-top 45\n i,j=row-1,col-1\n while i>=0 and j>=0:\n if borad[i][j]=='Q':\n return False\n i-=1\n j-=1\n \n i,j=row-1,col+1\n while i>=0 and j<n:\n if borad[i][j]=='Q':\n return False\n i-=1\n j+=1\n \n return True", "def row1_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[1][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row1_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 2:\r\n # print 'All conditions are correct!'\r\n return True", "def test_single(self):\n df = self.df.head(1).copy()\n out = get_full_column(df.values)\n self.assertTrue(out == 0)", "def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True", "def valid_guess(self, row, col):\n # if row nor col is at an edge space, returns False\n if not isinstance(row, int) or not isinstance(col, int):\n return False\n # ensures no corner spaces have been selected\n if row < 1 or row > 8:\n return False\n if col < 1 or col > 8:\n return False\n return True", "def verify(self):\n for col in self._columns:\n if col not in self._table_obj.columns.keys():\n raise GaiaException('{} column not found in {}'.format(\n col, self._table_obj))", "def check_rows():\n global ongoing_game\n row_1 = board[0] == board[1] == board[2] != \"*\"\n row_2 = board[3] == board[4] == board[5] != \"*\"\n row_3 = board[6] == board[7] == board[8] != \"*\"\n if row_1 or row_2 or row_3:\n ongoing_game = False\n if row_1:\n return board[0]\n elif row_2:\n return board[3]\n elif row_3:\n return board[6]\n else:\n return None", "def test_no_column(self):\n\n self.assertRaises(ValueError, self.table.where, 'True')", "def TestColumn(SudokuGrid):\r\n for i in range(9):\r\n for j in range(8):\r\n for k in range(j+1,9):\r\n if SudokuGrid[j][i]==SudokuGrid[k][i]:\r\n return False\r\n return True", "def _validate_cols(cols):\n\n\tif cols is not None and len(cols) < 2:\n\t\traise ValueError('too few features')", "def filter_row(col, rw):\n return rw == row", "def test_has_incorrect_integer(row):\n assert not sudoku.no_wrong_integers(row)", "def check_col(sudoku):\r\n for col in range(9):\r\n for row in range(8):\r\n test = sudoku[row][col]\r\n for i in range(row+1,9):\r\n if sudoku[i][col] == test:\r\n return True #returns True is there is more than two of the same numbers in a column\r", "def row0_invariant(self, target_col):\n # replace with your code\n if self.get_number(0, target_col) != 0:\n return False\n current = 0\n for row in range(2, self.get_height()):\n if target_col == self.get_width() - 1:\n current = self._grid[row][0]\n else:\n current = self._grid[row - 1][-1] + 1\n column = self._grid[row]\n for grid in column:\n if grid != current:\n print 'Error 4'\n return False\n current += 1\n current = self._grid[1][target_col]\n for grid in self._grid[1][target_col:]:\n if grid != current:\n print 'Error 5'\n return False\n current += 1\n return True", "def validCol(content,start,schedule):\n\t\t\tcur_id = content[1].id\n\t\t\t#print \"cur_id,length,start\",cur_id,content[1].length,start\n\t\t\tflag = 0\n\t\t\tfor i in range(content[1].length):\n\t\t\t\tfor j in range(len(schedule.w)):\n\t\t\t\t\t#print start,i,content[1]\n\t\t\t\t\tif schedule.w[j][start+i]!=None and \\\n\t\t\t\t\t\tschedule.w[j][start+i][1].id == cur_id:\n\t\t\t\t\t\tflag += 1\n\t\t\tif flag != content[1].length:\n\t\t\t\t#print \"col not valid\",flag,content[1].length,cur_id\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True", "def checkSafe(Board, rows, column):\n for x in range(rows):\n if (Board[x] == column or\n Board[x] + rows - x == column or\n Board[x] + x - rows == column):\n return False\n return True", "def _pre_check(self) -> bool:\n if self._fuse_row:\n rows = (\n self._tiling.cells_in_row(self._row_idx),\n self._tiling.cells_in_row(self._row_idx + 1),\n )\n else:\n rows = (\n self._tiling.cells_in_col(self._col_idx),\n self._tiling.cells_in_col(self._col_idx + 1),\n )\n has_a_long_row = any(len(row) > 1 for row in rows)\n if has_a_long_row:\n return False\n first_cell = next(iter(rows[0]))\n second_cell = next(iter(rows[1]))\n cells_are_adjacent = (\n first_cell[0] == second_cell[0] or first_cell[1] == second_cell[1]\n )\n if not cells_are_adjacent:\n return False\n same_basis = (\n self._tiling.cell_basis()[first_cell][0]\n == self._tiling.cell_basis()[second_cell][0]\n )\n if not same_basis:\n return False\n self._first_cell = first_cell\n self._second_cell = second_cell\n return True" ]
[ "0.6793047", "0.6788679", "0.6685578", "0.66567594", "0.65624297", "0.6553434", "0.6511974", "0.6502489", "0.6456458", "0.6430093", "0.6386662", "0.63792455", "0.63757235", "0.63635373", "0.63515013", "0.6350334", "0.6341316", "0.6338641", "0.63122106", "0.6300438", "0.6298138", "0.62792474", "0.62524235", "0.6237514", "0.62236387", "0.62167513", "0.61866903", "0.6185736", "0.6184861", "0.6176555", "0.6164317", "0.6161782", "0.6159704", "0.6159306", "0.6156147", "0.61489403", "0.6142758", "0.6134939", "0.61152494", "0.60974306", "0.60915697", "0.60804707", "0.60646117", "0.6060092", "0.60481715", "0.6028679", "0.6021481", "0.60069704", "0.6005212", "0.60043633", "0.59915036", "0.59809965", "0.5975733", "0.596448", "0.59591556", "0.59520936", "0.5951777", "0.5943778", "0.59401983", "0.593523", "0.5921306", "0.5919335", "0.5917422", "0.59171426", "0.59111273", "0.590547", "0.59039325", "0.59023035", "0.59014", "0.5885982", "0.58853793", "0.5884059", "0.5877024", "0.5877024", "0.58700395", "0.5866905", "0.586319", "0.5862567", "0.5861917", "0.58601457", "0.58585876", "0.58482647", "0.58468646", "0.58285135", "0.58225584", "0.58162314", "0.5814931", "0.58144724", "0.57949317", "0.57940316", "0.57803184", "0.57752454", "0.57606685", "0.5760419", "0.5750049", "0.5749341", "0.574875", "0.5747818", "0.5738447", "0.5719512", "0.5718261" ]
0.0
-1
Split an image into a specified number of tiles.
def slice( filename, number_tiles=None, col=None, row=None, save=True, DecompressionBombWarning=True, ): if DecompressionBombWarning is False: Image.MAX_IMAGE_PIXELS = None im = Image.open(filename) im_w, im_h = im.size columns = 0 rows = 0 if number_tiles: validate_image(im, number_tiles) columns, rows = calc_columns_rows(number_tiles) else: validate_image_col_row(im, col, row) columns = col rows = row tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows)) tiles = [] number = 1 for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error. for pos_x in range(0, im_w - columns, tile_w): # as above. area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h) image = im.crop(area) position = (int(floor(pos_x / tile_w)) + 1, int(floor(pos_y / tile_h)) + 1) coords = (pos_x, pos_y) tile = Tile(image, number, position, coords) tiles.append(tile) number += 1 if save: save_tiles( tiles, prefix=get_basename(filename), directory=os.path.dirname(filename) ) return tuple(tiles)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles // 2))\n )\n tile_width = math.ceil(w / (self._n_tiles // 2))\n tiles = [] # type: ignore\n for i in range(0, h, tile_height):\n for j in range(0, w, tile_width):\n tiles.append(\n (\n (i, i + tile_height, j, j + tile_width),\n image[i : i + tile_height, j : j + tile_width, :],\n )\n )\n return tiles", "def split_image_into_number_of_tiles(\n arr: Image, x_ntiles: int, y_ntiles: int, overlap: int\n):\n img_width, img_height = arr.shape[-1], arr.shape[-2]\n tile_w = img_width // x_ntiles\n tile_h = img_height // y_ntiles\n return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)", "def chunks(img, n):\n shape = img.shape\n imgs = []\n\n nx = int(n * (shape[1]/(shape[0] + shape[1])))\n ny = n - nx\n\n x = int(shape[0]/n)\n y = int(shape[0]/n)\n\n for i in range(nx - 1):\n line = []\n for j in range(ny - 1):\n line.append(img[y*j: y*(j+1), x*i: x*(i+1), ::])\n imgs.append(line)\n return imgs", "def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):\n x_axis = -1\n y_axis = -2\n arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]\n\n x_ntiles = (\n arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1\n )\n y_ntiles = (\n arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1\n )\n\n tiles = []\n\n # row\n for i in range(0, y_ntiles):\n # height of this tile\n ver_f = tile_h * i\n ver_t = ver_f + tile_h\n\n # col\n for j in range(0, x_ntiles):\n # width of this tile\n hor_f = tile_w * j\n hor_t = hor_f + tile_w\n\n tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)\n\n tiles.append(tile)\n tile_shape = [tile_h, tile_w]\n ntiles = dict(x=x_ntiles, y=y_ntiles)\n padding = dict(left=0, right=0, top=0, bottom=0)\n if arr_width % tile_w == 0:\n padding[\"right\"] = 0\n else:\n padding[\"right\"] = tile_w - (arr_width % tile_w)\n if arr_height % tile_h == 0:\n padding[\"bottom\"] = 0\n else:\n padding[\"bottom\"] = tile_h - (arr_height % tile_h)\n info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)\n return tiles, info", "def slice_image(image, tile_size):\n height = image.shape[0]\n width = image.shape[1]\n assert height > tile_size and width > tile_size\n\n num_tiles_x, num_tiles_y = number_of_patches(width, height, tile_size)\n width, height = output_image_size(num_tiles_x, num_tiles_y, tile_size)\n\n # Crop image to new size\n image = image[:height, :width]\n\n tiles = np.zeros((num_tiles_y, num_tiles_x, tile_size, tile_size, 3))\n for i, ty in enumerate(range(0, height, tile_size)):\n for j, tx in enumerate(range(0, width, tile_size)):\n tiles[i, j] = image[ty : ty + tile_size, tx : tx + tile_size]\n\n return tiles", "def split_into_tiles(self, x: torch.Tensor):\n tiles, self._coords, self._overlap = self._get_tiles_and_coords(x)\n self._num_tiles = tiles.shape[0]\n return tiles", "def split_tileset(self, tileset):\n\n tiles = self.tiles\n firstgid = tileset.firstgid\n tilewidth = self.tilewidth\n tileheight = self.tileheight\n margin = tileset.margin\n\n # carga la imagen del tileset y obtiene sus dimensiones\n image = pygame.image.load(tileset.image_path).convert_alpha()\n image_width, image_height = image.get_size()\n\n # calcula el número de columnas\n cols = image_width // tilewidth\n\n # calcula el espaciamiento entre cada tile en cada eje\n tx = tilewidth + tileset.spacing\n ty = tileheight + tileset.spacing\n\n # calcula la máxima distancia a iterar en cada eje\n max_y = image_height - tileheight + 1\n max_x = image_width - tilewidth + 1\n\n # divide una imagen en tiles\n for row, y in enumerate(xrange(margin, max_y, ty)):\n for col, x in enumerate(xrange(margin, max_x, tx)):\n tile = image.subsurface((x, y, tilewidth, tileheight))\n tiles[firstgid + row * cols + col] = tile", "def split_to_tiles(array: np.ndarray, tile_height: int, tile_width: int) -> np.ndarray:\n arr_height, arr_width, *dimensions = array.shape\n nchannels = dimensions[0] if dimensions else 1\n new_shape = get_shape_for_tile_split(\n arr_height, arr_width, nchannels, tile_height, tile_width\n )\n return array.reshape(new_shape).swapaxes(1, 2)", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def make_tiles_limits(im, n_splits, margin=0):\n \n if n_splits == 1:\n return [0, im.shape[1], 0, im.shape[0]]\n # number of splits per axis\n ax_splits = int(np.log2(n_splits))\n x_segments = split_range(im.shape[1], ax_splits)\n y_segments = split_range(im.shape[0], ax_splits)\n \n if margin > 0:\n x_segments = extend_indices(x_segments, margin=margin)\n y_segments = extend_indices(y_segments, margin=margin)\n \n # make combinations of [xmin, xmax, ymin, ymax] indices of tiles\n tiles_indices = []\n for xlim in x_segments:\n for ylim in y_segments:\n tiles_indices.append(xlim + ylim)\n return tiles_indices", "def splitImages(self):\n imgs = self.img_list\n frames = self.frame_number.value()\n grps = []\n for i in range(0, len(imgs), frames):\n grps.append(imgs[i:i + frames])\n\n return grps", "def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles", "def split_tiles(module_data):\n raise NotImplementedError", "def split(self):\n sub_images = []\n\n for region in regionprops(self.cells):\n minr, minc, maxr, maxc = region.bbox\n sub_image = self.image_raw[max(0, minr - 10):maxr, max(0, minc - 10):maxc, :]\n\n sub_images.append(FQimage(data=sub_image))\n\n return sub_images", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles", "def split_images(x, y=None, size=(128, 128), num_part=4):\n x_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n x_imgs = x_patches.transform(x)\n # Check if number of channels is the same for grayscale\n if x.shape[-1] != x_imgs.shape[-1]:\n x_imgs = x_imgs[:, :, :, np.newaxis]\n\n if not y is None:\n y_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n y_imgs = y_patches.transform(y)\n\n # Check if number of channels is the same for grayscale\n if y.shape[-1] != y_imgs.shape[-1]:\n y_imgs = y_imgs[:, :, :, np.newaxis]\n\n return x_imgs, y_imgs\n\n return x_imgs", "def _split_heads(self, x, is_picture):\n if is_picture is False:\n if len(x.shape) != 3:\n raise ValueError(\"x must have rank 3\")\n shape = x.shape\n return x.reshape(shape[0], shape[1], self.num_heads, shape[2]//self.num_heads).permute(0, 2, 1, 3).contiguous()\n else:\n if len(x.shape) != 5:\n raise ValueError(\"x must have rank 5\")\n shape = x.shape\n return x.reshape(shape[0], shape[1], shape[2], shape[3], self.num_heads, shape[4]//self.num_heads).permute(0, 4, 1, 2, 3, 5).contiguous()", "def lap_split_n(img, n):\n levels = []\n\n print(\"inside lap_split_n function \")\n\n for i in range(n):\n img, hi = lap_split(img)\n levels.append(hi)\n levels.append(img)\n return levels[::-1]", "def split_image_with_bboxes(bboxes, image, tiles=4):\n\n if tiles == 0:\n return {(0, 0): {\"image\": image, \"bboxes\": bboxes}}\n assert tiles % 2 == 0, \"Error in splitting images. Uneven number of images requested.\"\n\n split = tiles / 2\n\n height, width, *_ = image.shape\n\n new_height = height / split\n new_width = width / split\n\n tiles = {}\n\n tile_height = new_height\n\n for row in range(int(split)):\n tile_width = new_width\n for col in range(int(split)):\n\n # Create image with true values on tile\n canvas = np.zeros_like(image)\n tile_start = (int(tile_height-new_height), int(tile_width-new_width))\n tile_end = (int(tile_height), int(tile_width))\n canvas[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]] = 1\n\n new_bboxes = []\n for bbox in bboxes:\n\n xmin, ymin, xmax, ymax = bbox\n\n # Overlap of image tile and bbox\n bbox_image = np.zeros_like(image)\n bbox_image[ymin:ymax, xmin:xmax] = 1\n\n overlap = np.logical_and(canvas, bbox_image)\n\n if np.sum(overlap) < 1:\n continue\n\n overlap_index = np.argwhere(overlap)\n\n overlap_xmin, overlap_ymin = overlap_index[0][1], overlap_index[0][0]\n overlap_xmax, overlap_ymax = overlap_index[-1][1]+1, overlap_index[-1][0]+1\n\n new_xmin = overlap_xmin - col * new_width\n new_ymin = overlap_ymin - row * new_height\n new_xmax = overlap_xmax - col * new_width\n new_ymax = overlap_ymax - row * new_height\n\n new_bbox = (new_xmin, new_ymin, new_xmax, new_ymax)\n\n new_bboxes.append(new_bbox)\n\n cropped_image = image[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]]\n tiles[(row, col)] = {\"image\": cropped_image, \"bboxes\": new_bboxes}\n\n tile_width = tile_width + new_width\n tile_height = tile_height + new_height\n\n return tiles", "def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images", "def img_split(img):\n\tbands = img.shape[2]\n\tif bands is 1:\n\t\treturn \"Image already is 1D. Why would you split it?\"\n\n\tband1 = img[:, :, 0]\n\tband2 = img[:, :, 1]\n\tband3 = img[:, :, 2]\n\tif bands is 4:\n\t\tband4 = img[:, :, 4]\n\t\treturn(band1, band2, band3, band4)\n\treturn(band1, band2, band3)", "def tiffmatrixSplit(kv):\n filename, tiffmat = kv[0], kv[1]\n # Each image is 500x500\n kv_list = []\n if len(tiffmat) == 2500:\n num_matrices = 5**2\n split_size = 5\n elif len(tiffmat) == 5000:\n num_matrices = 10**2\n split_size = 10\n else:\n raise ValueError(\"TIFF file has dimensions other than 2500x2500 or 5000x5000\")\n all_matrices = []\n file_names = [filename + '-' + str(i) for i in np.arange(num_matrices)]\n big_rows = np.vsplit(tiffmat, split_size)\n for row in big_rows:\n all_matrices += np.hsplit(row, 5)\n return list(zip(file_names,all_matrices))", "def split_into_chunks(x, n):\n csize = int(np.ceil(len(x) / n))\n out = list()\n \n i = 0\n while i * csize < len(x):\n out.append(x[(i * csize):(i * csize + csize)])\n i += 1\n\n return out", "def split(container, count):\n return [container[_i::count] for _i in range(count)]", "def split_image(image_name):\n #pil_image = Image.fromarray(image_name)\n red, green, blue = img.split()\n\n return red, green, blue", "def tile_images(image_stack):\n assert len(image_stack.shape) == 4\n image_list = [image_stack[i, :, :, :] for i in range(image_stack.shape[0])]\n tiled_images = np.concatenate(image_list, axis=1)\n return tiled_images", "def split(image):\n shred_width = 32\n image_width, image_height = image.size\n shreds = {}\n for x in range(0, image_width, shred_width):\n img = image.crop((x, 0, shred_width + x, image_height))\n s = Shred(img)\n shreds[x] = s\n return shreds", "def cut_image(im):\n width, height = im.size\n # Three pictures in a row\n item_width = int(width / 3)\n box_list = []\n for i in range(0, 3):\n for j in range(0, 3):\n box = (j * item_width, i * item_width, (j + 1) * item_width, (i + 1) * item_width)\n box_list.append(box)\n image_list = [im.crop(box) for box in box_list]\n return image_list", "def split_heads(x, num_heads):\n sz = x.size()\n # x -> [batch_size, length, heads, depth / num_heads]\n x = x.view(sz[0], sz[1], num_heads, sz[2] // num_heads)\n # [batch_size, length, 1, depth // num_heads] * \n heads = torch.chunk(x, num_heads, 2)\n x = []\n for i in range(num_heads):\n x.append(torch.squeeze(heads[i], 2))\n return x", "def split_image_with_bboxes_efficient(bboxes, image, bbox_size=50, tiles=4):\n\n if tiles == 0:\n return {(0, 0): {\"image\": image, \"bboxes\": bboxes}}\n\n split = tiles / 2\n\n height, width, *_ = image.shape\n\n new_height = height / split\n new_width = width / split\n\n tiles = {}\n\n tile_height = new_height\n\n for row in range(int(split)):\n tile_width = new_width\n for col in range(int(split)):\n\n # Create image with true values on tile\n canvas = np.zeros_like(image)\n\n ymin = int(tile_height-new_height)\n ymax = int(tile_height)\n xmin = int(tile_width-new_width)\n xmax = int(tile_width)\n\n canvas[ymin:ymax, xmin:xmax] = 1\n\n query_bboxes = find_query_boxes(bboxes, xmin, xmax, ymin, ymax, bbox_size)\n\n new_bboxes = find_overlaps(canvas, query_bboxes, col, row, new_width, new_height)\n\n cropped_image = image[ymin:ymax, xmin:xmax]\n\n tiles[(row, col)] = {\"image\": cropped_image, \"bboxes\": new_bboxes}\n\n tile_width = tile_width + new_width\n tile_height = tile_height + new_height\n\n return tiles", "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys", "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys", "def split_image(image_path: str, image_paths_iter: List[str]) -> List[str]:\n try:\n im = Image.open(image_path)\n except PIL.UnidentifiedImageError:\n return []\n\n width, height = im.size\n\n result = []\n\n splits = [[0, 0], [width // 2, 0], [0, height // 2], [width // 2, height // 2]]\n for (x, y), file_path in zip(splits, image_paths_iter):\n left = x\n top = y\n right = x + width // 2\n bottom = y + height // 2\n\n im.crop((left, top, right, bottom)).save(file_path)\n result.append(os.path.basename(file_path))\n os.remove(image_path)\n return result", "def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]", "def split_into_players(self, team, num_players=5):\n height = team.shape[0] // num_players\n players = []\n\n for h in range(num_players):\n player = team[h * height : (h + 1) * height, :, :].copy()\n players.append(self.convert_to_pil_image(player))\n\n return players", "def get_shape_for_tile_split(\n arr_height: int, arr_width: int, nchannels: int, tile_height: int, tile_width: int\n) -> list[int]:\n shape = [\n arr_height // tile_height,\n tile_height,\n arr_width // tile_width,\n tile_width,\n ]\n if nchannels > 1:\n shape.append(nchannels)\n return shape", "def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]", "def grouped(pixels):\n _n = 3 #groups it by 3\n _grouped = [pixels[_i :_i + _n] for _i in range(0,len(pixels), _n)]\n return _grouped", "def _tile_images(imgs, tile_shape, concatenated_image, margin_color=None):\n x_num, y_num = tile_shape\n one_width = imgs[0].shape[1]\n one_height = imgs[0].shape[0]\n if concatenated_image is None:\n concatenated_image = np.zeros((one_height * y_num, one_width * x_num, 3),\n dtype=np.uint8)\n if margin_color is not None:\n concatenated_image[:, :] = margin_color\n for y in range(y_num):\n for x in range(x_num):\n i = x + y * x_num\n if i >= len(imgs):\n pass\n else:\n concatenated_image[y*one_height:(y+1)*one_height,x*one_width:(x+1)*one_width,] = imgs[i]\n return concatenated_image", "def load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None): #-> [images]\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None): #-> [images]\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def _chunks(l, ncols):\n assert isinstance(ncols, int), \"ncols must be an integer\"\n for i in range(0, len(l), ncols):\n yield l[i: i+ncols]", "def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def divide_chunks(a_list, n):\n return [a_list[i:i + n] for i in range(0, len(a_list), n)]", "def grouper(n, iterable):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n yield chunk", "def split_every(n, iterable):\r\n iterator = iter(iterable)\r\n return takewhile(bool, (list(islice(iterator, n)) for _ in repeat(None)))", "def Split(cls, img):\n pix = img.load()\n\n # vertical cut\n vertical = []\n foundLetter = False\n for x in range(img.size[0]):\n inLetter = False\n for y in range(img.size[1]):\n if pix[x, y] == 0:\n inLetter = True\n break\n if not foundLetter and inLetter:\n foundLetter = True\n start = x\n if foundLetter and not inLetter:\n foundLetter = False\n end = x\n vertical.append((start, end))\n\n # horizontal cut\n def _findFistLine(pix, y_range, x_start, x_end):\n for y in y_range:\n for x in range(x_start, x_end):\n if pix[x, y] == 0:\n return y\n\n horizontal = []\n for i in vertical:\n start = _findFistLine(pix, range(img.size[1]), *i)\n end = _findFistLine(pix, reversed(range(img.size[1])), *i)\n horizontal.append((start, end))\n\n return [(vertical[i][0], horizontal[i][0], vertical[i][1], horizontal[i][1] + 1) for i in range(len(vertical))]", "def splitH_unlifted(pixmap):\n h = pixmap.shape[0]\n if h % 2 == 1:\n h = h // 2\n return [pixmap[:h,:], pixmap[h+1:,:]]\n else:\n h = h // 2\n return [pixmap[:h,:], pixmap[h:,:]]", "def split(array, nrows, ncols):\r\n r, h = array.shape\r\n return (array.reshape(h//nrows, nrows, -1, ncols)\r\n .swapaxes(1, 2)\r\n .reshape(-1, nrows, ncols))", "def grouper(iterable, n):\n it = iter(iterable)\n while True:\n chunk = tuple(islice(it, n))\n if not chunk:\n return\n yield chunk", "def split_list(self, num_bit = 5):\n jpg_list = listdir(join(self.dataset_dir, 'data/jpg_images'))\n label_list = listdir(join(self.dataset_dir, 'data/label_mat'))\n split_raw = scipy.io.loadmat(join(self.dataset_dir, 'scripts/split.mat'))\n train_raw = split_raw['trainIds']\n test_raw = split_raw['testIds']\n val_raw = split_raw['valIds']\n pattern_index = \"%0\" + str(num_bit) + \"d\"\n training_list = []\n for i in range(train_raw.shape[0]):\n train_id = pattern_index % train_raw[i, 0]\n if (((train_id + '.jpg') in jpg_list) and ((train_id + '.mat') in label_list)):\n training_list.append(train_id)\n test_list = []\n for i in range(test_raw.shape[0]):\n test_id = pattern_index % test_raw[i, 0]\n if (((test_id + '.jpg') in jpg_list) and ((test_id + '.mat') in label_list)):\n test_list.append(test_id)\n val_list = []\n for i in range(val_raw.shape[0]):\n val_id = pattern_index % val_raw[i, 0]\n if (((val_id + '.jpg') in jpg_list) and ((val_id + '.mat') in label_list)):\n val_list.append(val_id)\n return [training_list, test_list, val_list]", "def split_datasets(img_lst):\n num = len(img_lst)\n\n idx = np.random.permutation(num)\n train_lst = np.array(img_lst)[idx[:int(num * .8)]] # 80/20 split\n validation_lst = np.array(img_lst)[idx[int(num * .8):int(num * .9)]]\n test_lst = np.array(img_lst)[idx[int(num * .9):]]\n return train_lst, validation_lst, test_lst", "def make_chunks(l, n):\n return [l[i:i+n] for i in range(0, len(l), n)]", "def img_to_slices(img):\n res = []\n\n for i, slice_img in enumerate(img):\n res.append(slice_img)\n return res", "def _chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def greedy_split(arr, n, axis=0):\n length = arr.shape[axis]\n # compute the size of each of the first n-1 blocks\n block_size = int(np.ceil(length / float(n)))\n # the indices at which the splits will occur\n ix = np.arange(block_size, length, block_size)\n return np.array(np.split(arr, ix, axis))", "def tile_image(\n im: Image.Image, width: int, height: int, mode: Optional[str] = \"RGB\", **kwargs: Any\n) -> Image.Image:\n im_out = Image.new(mode, (width, height), **kwargs)\n\n h_tiles = ceil(width / im.width)\n v_tiles = ceil(height / im.height)\n\n for i in range(v_tiles):\n y = im.height * i\n for j in range(h_tiles):\n x = im.width * j\n im_out.paste(im, box=(x, y))\n\n return im_out", "def ghetto_split(list_, chunk_size=100):\n logging.debug(f\"Splitting list of {len(list_)} length, chunk size = {chunk_size}\")\n split_lists = []\n for i in range(0,len(list_),chunk_size):\n split_lists.append(list_[i:i+chunk_size])\n logging.debug(f\"List has been split into {len(split_lists)} lists. Total num of elements in split lists is {sum([len(i) for i in split_lists])}\")\n return split_lists", "def _chunk_windows(windows, num_chunks):\n if num_chunks <= 0 or int(num_chunks) != num_chunks:\n raise ValueError(\"Number of chunks must be an integer > 0\")\n num_chunks = min(len(windows) - 1, num_chunks)\n splits = np.array_split(windows[:-1], num_chunks)\n chunks = []\n for j in range(num_chunks - 1):\n chunk = np.append(splits[j], splits[j + 1][0])\n chunks.append(chunk)\n chunk = np.append(splits[-1], windows[-1])\n chunks.append(chunk)\n return chunks", "def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")", "def choose_optimal_image_split(im, method='im_size', min_tile_size=360000):\n \n n_cores = os.cpu_count()\n # number of segmented objects, drop the background value\n n_obj = np.unique(im).size - 1\n \n if method == 'im_size':\n # avoid too many splits if image is not so big\n im_size = im.nbytes # slightly different from sys.getsizeof(im)\n # max power of 2\n max_i = int(np.log2(n_cores)) + 1\n n_splits = 1\n for i in range(1, max_i):\n new_split = 2**i\n if im_size / new_split >= min_tile_size:\n n_splits = new_split\n else:\n break\n elif method == 'naive':\n n_splits = n_cores\n \n return n_splits", "def split_image(image, label):\n # [left, right, top, botton]\n left_margin = label[0]\n right_margin = image_size[0] - (label[0] + breach_size[0])\n top_margin = label[1]\n bottom_margin = image_size[1] - (label[1] + breach_size[1])\n margin = [left_margin, right_margin, top_margin, bottom_margin]\n # Calculate probabilities about the margin for all directions\n probabilities = normalize(margin)\n # Pick directions and boundary\n direction, size = random_pick(margin, probabilities)\n boundary = random.randint(0, size)\n\n image_np = image2matrix(image)\n image_new_np, deviation = random_split_matrix(image_np, boundary, direction)\n image_new = matrix2image(image_new_np)\n label_new = [label[0] + deviation[0], label[1] + deviation[1]]\n\n return image_new, label_new", "def grouper(n, iterable):\n\tit = iter(iterable)\n\twhile True:\n\t\tchunk = tuple(itertools.islice(it, n))\n\t\tif not chunk:\n\t\t\treturn\n\t\tyield chunk", "def __chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def make_sections(self, split_num=1000):\n self.obstacles.add(self.chairlift.pylons.sprites())\n num = max(1, int(len(self.obstacles) / split_num))\n section_length = int(self.map_size[1] / num)\n self.sections = {}\n for y in range(0, self.map_size[1], section_length):\n rect_info = (0, y, self.map_size[0], section_length)\n rect = pg.Rect(rect_info)\n self.sections[rect_info] = pg.sprite.Group([x for x in self.obstacles if rect.collidepoint(x.rect.midbottom)])", "def chunks(parts, n):\n for i in range(0, len(parts), n):\n yield parts[i:i+n]", "def subimage(path_image, height=None, width=None, stepsize=1):\n img = cv2.imread(path_image)\n h, w = img.shape[:2]\n\n if height is None or width is None:\n height = int(h / 2)\n width = int(w / 2)\n stepsize = width\n\n for x in range(0, h, stepsize):\n px = x\n end_x = x + height\n if end_x > h:\n end_x = h\n px = max(end_x - height, 0)\n\n for y in range(0, w, stepsize):\n py = y\n end_y = y + width\n if end_y > w:\n end_y = w\n py = max(end_y - width, 0)\n\n yield img[px:end_x, py:end_y]", "def chunks(_class, l, n):\n\t\t# CITE: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python\n\t for i in xrange(0, len(l), n):\n\t yield l[i:i+n]", "def get_tiles(self, numTiles, gameBag):\r\n tiles_picked = gameBag.pick_tiles(numTiles)\r\n for givenTile in tiles_picked:\r\n self.rack.append(givenTile)", "def iter_chunks(self, chunk_size, depths=True, step_size=None):\n step_size = step_size or chunk_size\n\n i = 0\n while i < self.height:\n if depths:\n yield self.img[i:i+chunk_size], self.depths[i:i+chunk_size]\n else:\n yield self.img[i:i+chunk_size]\n i += step_size", "def split_to_batches(iterable, n=1):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))", "def chunker(results, n):\n\n def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n m = int(len(results) / n)\n return list(grouper(iterable=results, n=m, fillvalue=None))", "def chunk(iter_list, size):\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())", "def chunk(self, shape, split) -> NotImplementedError:\n raise NotImplementedError()", "def long_slice(image_path, out_name, outdir, slice_size, net):\n img = Image.open(image_path)\n imgout = Image.open(image_path)\n orw, orh = img.size\n width, height = img.size\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n orw, orh = imgout.size\n width, height = img.size\n print(img.size)\n r = 1\n draw = ImageDraw.Draw(imgout)\n\n flag_continue = True\n while flag_continue:\n if os.path.exists(\"./testsliceimage/list.txt\"):\n os.remove(\"./testsliceimage/list.txt\")\n file = open(\"./testsliceimage/list.txt\", \"w+\")\n for sliceh in range(slicesh*step):\n for slicew in range(slicesw*step):\n #set the bounding box! The important bit\n bbox = (int(slicew*slice_size/step), int(sliceh*slice_size/step), int(slicew*slice_size/step)+slice_size, int(sliceh*slice_size/step)+slice_size)\n working_slice = img.crop(bbox)\n\n working_slice.save(os.path.join(outdir, \"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\"))\n file.write(\"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\\n\")\n\n if sliceh == 16 and slicew == 27 and width == 450 :\n print (int(slicew*slice_size/step), int(sliceh*slice_size/step),int(slicew*slice_size/step)+slice_size,int(sliceh*slice_size/step)+slice_size)\n\n file.close()\n transform_test = tf.Compose([tf.Grayscale(), tf.ToTensor(), tf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n testset = UnknownDataset(\"./testsliceimage/\", \"./testsliceimage/list.txt\", transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=WORKERS)\n\n with torch.no_grad():\n N = 0\n for data in testloader:\n images, img_names = data['image'], data['image_name']\n outputs = net(images.float())\n _, predicted = torch.max(outputs.data, 1)\n # print(predicted)\n if max(predicted) == 1 :\n ite = -1\n for predic in predicted :\n ite += 1\n if predic == 1 and outputs[ite][1]-outputs[ite][0] > CONFIDENCE:\n print(img_names[ite])\n # print(outputs)\n N += 1\n #dessiner carre sur image\n slh = int(img_names[ite].split('_')[4])\n slw = int(img_names[ite].split('_')[5][:-4])\n x1 = int(slh * slice_size / step)\n x2 = x1 + slice_size\n y1 = int(slw * slice_size / step)\n y2 = y1 + slice_size\n\n if slh == 16 and slw == 27 and width ==450 :\n print (x1, y1, x2, y2)\n\n print(r)\n rh = orh / height\n rw = orw / width\n x1 = x1 * rh\n x2 = x2 * rh\n y1 = y1 * rw\n y2 = y2 * rw\n\n draw.rectangle(((y1, x1), (y2, x2)), outline=\"red\")\n # draw.text((y2,x2), img_names[0])\n copyfile(\"./testsliceimage/\"+img_names[ite], \"./goodimage/\"+ img_names[ite])\n\n if width <= 200 or height <= 200:\n flag_continue = False\n else:\n r = r * scale\n width, height = int(width/scale), int(height/scale)\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n width, height = img.size\n\n # imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout.save(\"./rectangle/out\", \"PNG\")", "def _split_heads(x, num_heads):\n\tshape_lst = bert_utils.get_shape_list(x)\n\tdepth = shape_lst[-1]\n\tbatch = shape_lst[0]\n\tseq = shape_lst[1]\n\t# print(x.get_shape(), \"===splitheads===\")\n\tsplitted_x = tf.reshape(x, [tf.shape(x)[0], tf.shape(x)[1], \\\n\t\tnum_heads, depth // num_heads])\n\treturn tf.transpose(splitted_x, [0, 2, 1, 3])", "def _chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def split(a, n):\n k, m = divmod(len(a), n)\n ret = [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)]\n return ret", "def split_to_chunks(of_list, chunk_size):\n assert of_list is not None\n\n for i in range(0, len(of_list), chunk_size):\n yield of_list[i:i + chunk_size]", "def chunks(l, n):\n for i in range(0, n):\n yield l[i::n]", "def _chunks(l, n):\n\tfor i in range(0, len(l), n):\n\t\tyield l[i:i + n]", "def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))", "def main():\n\tparser = construct_parser()\n\targs = parser.parse_args()\n\ttiles = slice(args.image, args.num_tiles, save=False)\n\tsave_tiles(tiles, prefix=get_basename(args.image), directory=args.dir,\n\t\t format=args.format)", "def partition_files(list_of_files, number_of_parts):\n return np.array_split(list_of_files, number_of_parts)", "def _get_chunks(l, n = 10):\n \n for i in range(0, len(l), n): yield l[i: i + n]", "def chunk(iterable, n):\n iterable = [e for e in iterable]\n avg_length = int(math.ceil(len(iterable) / n))\n return [iterable[i * avg_length:(i + 1) * avg_length] for i in range(n)]", "def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid", "def _split_heads(self, x: torch.Tensor) -> torch.Tensor:\n depth = x.size(-1)\n split_x = torch.reshape(x, (\n x.size(0), x.size(1),\n self._hparams.num_heads, depth // self._hparams.num_heads))\n return split_x.permute((0, 2, 1, 3))", "def _convert_chunk_to_tiles(\n feature_data: np.array, loss_window_radius: int, window_radius: int\n) -> Tuple[np.array, np.array]:\n\n output_array = []\n col_index = []\n for _col in range(0, feature_data.shape[1], loss_window_radius * 2):\n col_index.append(min(_col, feature_data.shape[1] - window_radius * 2))\n output_array.append(feature_data[:, col_index[-1] : col_index[-1] + window_radius * 2, :])\n output_array = np.stack(output_array)\n output_array = np.reshape(\n output_array, (output_array.shape[0], output_array.shape[1], output_array.shape[2], feature_data.shape[-1])\n )\n\n col_index = np.array(col_index)\n\n return output_array, col_index", "def slice(n, m):\n chunks = []\n for piece in islice(n, m):\n chunks.append(piece)\n return chunks", "def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)", "def create_grid(images, n_rows=4, n_cols=4):\n k = min(n_rows * n_cols, len(images))\n indices = [i for i in range(k)]\n return _create_grid(images, indices, n_rows, n_cols)", "def chunks(self, n):\n return _([self._[i:i+n] for i in range(0, self.size()._, n)])", "def chunks(tlist, n):\n n = max(1, n)\n return (tlist[i:i+n] for i in range(0, len(tlist), n))", "def test_split_beyond_resolution_raises_ValueError():\n\n h = 11\n img_dim = (50, 50)\n amg = mg.MultiGrid(img_dim, h, WS=127)\n with pytest.raises(ValueError):\n amg.cells[0].split()" ]
[ "0.7739236", "0.76190794", "0.70275676", "0.67776227", "0.6704536", "0.66682243", "0.6638104", "0.6595239", "0.65494275", "0.6480226", "0.646695", "0.6428937", "0.6403003", "0.6392269", "0.6336873", "0.62955356", "0.6278064", "0.6265527", "0.6264345", "0.624642", "0.614579", "0.6131461", "0.6109429", "0.602018", "0.60108656", "0.60010946", "0.5977349", "0.59351265", "0.5910639", "0.5877873", "0.5863218", "0.5855848", "0.58289546", "0.58289546", "0.5804196", "0.57845247", "0.5772935", "0.57720536", "0.57567143", "0.57557786", "0.57549274", "0.5716676", "0.5716676", "0.5715037", "0.57068425", "0.5704924", "0.5704924", "0.5695728", "0.569101", "0.56829554", "0.5679308", "0.56731015", "0.5671546", "0.56683266", "0.56643647", "0.5660737", "0.5654717", "0.5643858", "0.56159186", "0.56097406", "0.5609725", "0.5600244", "0.5598472", "0.5591196", "0.5587513", "0.5586454", "0.55850136", "0.55701375", "0.5569712", "0.55624396", "0.5555536", "0.5548986", "0.55464244", "0.5543758", "0.5537033", "0.5536551", "0.5536442", "0.55275494", "0.5526671", "0.55256677", "0.5524169", "0.5516904", "0.5510734", "0.5500786", "0.5493659", "0.54935014", "0.54922885", "0.5487227", "0.54837084", "0.5478029", "0.54761744", "0.5475964", "0.5462008", "0.54578286", "0.545738", "0.54567426", "0.54537314", "0.544857", "0.5446994", "0.5446816" ]
0.64391273
11
Write image files to disk. Create specified folder(s) if they
def save_tiles(tiles, prefix="", directory=os.getcwd(), format="png"): for tile in tiles: tile.save( filename=tile.generate_filename( prefix=prefix, directory=directory, format=format ), format=format, ) return tuple(tiles)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(img, path):\n create_directories_for_file_name(path)\n writer = sitk.ImageFileWriter()\n writer.Execute(img, path, True)", "def create_directories(train_path, test_path):\n train_path.joinpath(\"images\").mkdir(parents=True)\n test_path.joinpath(\"images\").mkdir(parents=True)", "def createImageFolder():\n try:\n os.makedirs(imageFolder)\n except FileExistsError:\n # Exists, delete contents instead\n clearImageFolder()", "def setup_image_folder(path_to_images):\n\n print(\"setup images folder...\")\n\n if os.path.isdir(path_to_images):\n print(\"folder already exists: remove...\")\n shutil.rmtree(path_to_images)\n\n os.mkdir(path_to_images)\n print(\"folder created\")", "def make_path(self):\n folders = [\n f\"{self.save_path}{self.name}/json/\",\n f\"{self.save_path}{self.name}/images/\",\n ]\n if hasattr(self, \"masks\"):\n folders.append(f\"{self.save_path}{self.name}/masks/\")\n for folder in folders:\n if not os.path.exists(folder):\n os.makedirs(folder)", "def create_folder(self):\n self.config.csv_path.mkdir(parents=True, exist_ok=True)\n self.config.images_path.mkdir(parents=True, exist_ok=True)", "def __save_to_dir(self, imagelist, prefix, PATH):\n for pair in imagelist:\n directory = os.path.join(PATH, pair[1])\n if not os.path.exists(directory):\n os.mkdir(directory)\n filename = prefix + pair[2]\n pair[0].save(os.path.join(directory, filename))\n print(\"Saved \" + os.path.join(directory, filename))", "def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def create_noobj_folder(\n folder: PathLike, \n img_ext: str = \".jpg\",\n):\n folder = Path(folder).expanduser().resolve()\n images = glob(folder, img_ext)\n \n for image in images:\n filename = image.name\n _folder = image.parent.name\n path = folder / (image.stem + \".xml\")\n img_w, img_h = get_image_size(image)\n\n tree = ET.Element(\"annotation\")\n\n et_folder = ET.SubElement(tree, \"folder\")\n et_folder.text = _folder\n\n et_filename = ET.SubElement(tree, \"filename\")\n et_filename.text = filename\n\n et_path = ET.SubElement(tree, \"path\")\n et_path.text = str(path)\n\n et_img_size = ET.SubElement(tree, \"size\")\n ET.SubElement(et_img_size, \"width\").text = str(img_w)\n ET.SubElement(et_img_size, \"height\").text = str(img_h)\n ET.SubElement(et_img_size, \"depth\").text = \"3\"\n\n content = ET.tostring(tree, encoding=\"unicode\", pretty_print=True)\n try: \n path.write_text(content)\n except KeyboardInterrupt:\n path.write_text(content)\n exit()", "def create_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF,\n DIR_BACK, DIR_TEXT, DIR_FINAL)\n \n for dir in dirs:\n try:\n os.mkdir(os.path.join(cwd, dir))\n except OSError, e:\n print 'directory (', dir, ') already exists'", "def make_image_dir(to_path, filenames):\n image_dir = os.path.join(to_path, \"image_2\")\n os.makedirs(image_dir)\n for f in filenames:\n image_file = os.path.join(image_dir, f + \".png\")\n os.system(\"cp sample.png {}\".format(image_file))", "def make_directories(self):\n os.makedirs(self.data_dir, exist_ok=True)\n os.makedirs(self.patches_dir, exist_ok=True)\n os.makedirs(self.raw_image_dir, exist_ok=True)\n os.makedirs(self.pro_image_dir, exist_ok=True)\n os.makedirs(self.results_dir, exist_ok=True)", "def CreateDirs(self):\n# First, create a list of directories.\n dnames = []\n tags = ['', '_m', '_mf']\n for entry in self.info.keys():\n if self.info[entry]['type'] == 'epi':\n for tag in tags:\n fname = self.info[entry].get('imgfile%s' % tag, None)\n if fname is not None:\n dnames.append(os.path.dirname(fname))\n else:\n if self.info[entry].get('outdir',None) is not None:\n dnames.append(self.info[entry]['outdir'])\n\n# Create them if they don't already exist.\n for dname in dnames:\n if not os.path.exists(dname):\n self.MakeDir(dname)\n if self.verbose:\n print 'mkdir %s' % dname", "def make_processed_directories(zone, region, zoom_level = 19, image_size = 256):\n os.system(f'mkdir ../../data/processed/images-{image_size}-{region}-{zone}-{zoom_level}')\n os.system(f'mkdir ../../data/processed/masks-{image_size}-{region}-{zone}-{zoom_level}')\n img_path = f'../../data/processed/images-{image_size}-{region}-{zone}-{zoom_level}'\n mask_path = f'../../data/processed/masks-{image_size}-{region}-{zone}-{zoom_level}'\n return img_path, mask_path", "def preprocess_images(file_path, new_file_path):\n if not os.path.isdir(new_file_path):\n os.mkdir(new_file_path)\n i = 0\n for dir in listdir(file_path):\n j = 0\n for image_path in listdir(file_path + '/' + dir):\n image = open_image(image_path)\n cv2.imwrite(file_path + '/' + image_path + '/' str(i) + '/' +str(i) + '.jpg', image)\n j += 1\n i += 1", "def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')", "def imwrite(img, file_path, params=None, auto_mkdir=True):\n if auto_mkdir:\n dir_name = os.path.abspath(os.path.dirname(file_path))\n os.makedirs(dir_name, exist_ok=True)\n ok = cv2.imwrite(file_path, img, params)\n if not ok:\n raise IOError('Failed in writing images.')", "def make_folders(self):\n\t\tfor name in self.folders:\n\t\t\tos.makedirs(self.path+\"/\"+name,exist_ok=True)", "def create_train(train_img_path):\n\n f = open(\"train.txt\", \"w+\")\n for subdirs, dirs, files in os.walk(train_img_path):\n for filename in files:\n if filename.endswith(\".jpg\"):\n train_image_path = os.path.join(train_img_path, filename)\n print(train_image_path)\n f.write(train_image_path + \"\\n\")\n f.close()", "def write(self, img_tensor, img_paths, domain='A'):\n assert len(img_paths) == img_tensor.shape[0], \"size of paths and num of images not match!\"\n path = os.path.join(self.root, domain)\n if not os.path.exists(path):\n mkdir(path)\n for i in range(len(img_paths)):\n img = tensor2npimg(img_tensor[i], scale=True)\n _, filename = os.path.split(img_paths[i])\n img_write(img, os.path.join(path, filename))", "def _create_dir(self):\n images_train_dir = os.path.join('images', self.name, 'train')\n images_test_dir = os.path.join('images', self.name, 'test')\n log_dir = os.path.join('log', self.name)\n model_dir = os.path.join('checkpoint', self.name)\n if not os.path.exists(images_train_dir):\n os.makedirs(images_train_dir)\n\n if not os.path.exists(images_test_dir):\n os.makedirs(images_test_dir)\n\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n return images_train_dir, images_test_dir, log_dir, model_dir", "def folders():\n\n os.makedirs('Images/')\n os.makedirs('Seg/')\n\n return", "def create_output_directory_for_resized_images():\n\n try:\n if not os.path.isdir(RESIZED_NEGATIVE_PATH):\n return os.makedirs(RESIZED_NEGATIVE_PATH)\n elif not os.path.isdir(RESIZED_POSITIVE_PATH):\n return os.makedirs(RESIZED_POSITIVE_PATH)\n except OSError as e:\n print('Error --> {}'.format(e))", "def createDirs(self):\n logging.info(\"Creating Directories\")\n\n if not self.img_exist:\n self.reCreateDir(self.savePathJoin(\"Images\"))\n if not self.of_exist:\n self.reCreateDir(self.savePathJoin(\"Of\"))\n if not self.back_of_exist:\n self.reCreateDir(self.savePathJoin(\"Back_Of\"))\n if not self.depth_exist:\n self.reCreateDir(self.savePathJoin(\"Depth\"))\n if not self.object_detection_dir_exist and (\n self.ui.c_object_detection.isChecked() or self.ui.c_crash_plot.isChecked()\n ):\n self.reCreateDir(self.savePathJoin(\"ObjectDetection\"))\n if self.super_pixel_method != \"\" and not os.path.exists(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n ):\n os.makedirs(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n )\n\n self.reCreateDir(RESULTS)\n self.reCreateDir(NP_DIR)\n self.reCreateDir(MASK_DIR)\n\n if self.ui.c_crash_plot.isChecked():\n self.reCreateDir(PLOT_CRASH_DIR)\n if self.ui.c_draw.isChecked():\n self.reCreateDir(DRAW_DIR)\n if self.ui.c_velocity.isChecked():\n self.reCreateDir(VL_DIR)\n if self.ui.c_speed_plot.isChecked():\n self.reCreateDir(PLOT_SPEED_DIR)\n if self.super_pixel_method != \"\":\n self.reCreateDir(SUPER_PIXEL_DIR)\n if self.user[\"GT\"] != \"\" and self.ui.c_error_plot.isChecked():\n self.reCreateDir(PLOT_ERROR_DIR)", "def write_upload_files(self, appstruct):\n \n # Create the directory if it does not exist\n final_dir = \"thumbnails/%s\" % slugify(appstruct[\"serial\"])\n if not os.path.exists(final_dir):\n log.info(\"Make directory: %s\", final_dir)\n os.makedirs(final_dir)\n\n final_file = \"%s/uploaded.pdf\" % final_dir\n file_pointer = appstruct[\"pdf_upload\"][\"fp\"]\n self.single_file_write(file_pointer, final_file)", "def _save_image(self, image_name, image, output_dir):\n dst = '{}/{}'.format(output_dir, self._image_filename(image_name))\n os.makedirs(output_dir, exist_ok=True)\n try:\n with open(dst, 'wb') as f:\n for chunk in image.save(named=self.image_registry_name(image_name)):\n f.write(chunk)\n log.info('Image {} saved as {}'.format(image_name, dst))\n except Exception as err:\n if os.path.isfile(dst):\n os.remove(dst)\n raise err", "def test_save_image(self):\n\n from m3_save_images.m3_save_images import save_images\n folder_destination_name = \"unittest-sorted-images\"\n path_source = \"../img\"\n image_name = [\"00ff00.png\", \"aqua.png\", \"black.jpg\", \"yellow.png\", \"red2.jpg\", \"green.jpg\"]\n image_color = [\"Lime\", \"Aqua\", \"Black\", \"Yellow\", \"Red\", \"Green\"]\n # new empty folder is needed for testing save_image() function\n if os.path.isdir(folder_destination_name):\n shutil.rmtree(folder_destination_name)\n os.mkdir(folder_destination_name)\n # creating folders\n for i in range(0, 4):\n save_images(folder_destination_name, path_source, image_name[i], image_color[i])\n self.assertEqual(''.join(os.listdir(os.path.join(folder_destination_name, image_color[i]))), image_name[i])\n save_images(folder_destination_name, path_source, image_name[i], image_color[5])\n self.assertNotEqual(''.join(os.listdir(os.path.join(folder_destination_name, image_color[i]))), image_name[5])", "def insert_suppl_folders(path_mode):\n global REL_PATH_IMAGES\n os.mkdir(path_mode + '/allegati')\n if len(images_to_add) > 0:\n path_ex_images = path_mode + 'img'\n os.mkdir(path_ex_images)\n for img in images_to_add:\n path_img_src = os.getcwd() + '/' + REL_PATH_IMAGES + img\n shutil.copy2(path_img_src, path_ex_images)\n os.chmod(path_ex_images+\"/\"+img, S_IREAD)", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def _put_antenny_files_on_device(self):\n self._ensure_directory()\n self._recursive_put_files()", "def save_images_in_folder(folder, images, size = (224, 224), start_index = 0):\n\n # Loop over the images\n for i, image in enumerate(images):\n # Resize image to target size\n image = cv2.resize(image, dsize = size)\n \n # Convert image back to BGR color space\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n \n # Create the path where the image will be save, images will be indexed\n path = os.path.join(folder, str(start_index + i) + '.png')\n \n # Write / Save image \n cv2.imwrite(path, image)", "def save_output_image_to_directory(self):\n curr_directory = os.path.dirname(os.path.abspath(__file__))\n images_dir = curr_directory + \"/images/\"\n if not os.path.exists(images_dir):\n os.makedirs(images_dir)\n self.output_image_name = md5(str(uuid4()).encode()).hexdigest() + \".png\"\n image_file_name = images_dir + self.output_image_name\n self.output_image.save(image_file_name)\n logger.info(\"Image file saved locally : %s\", image_file_name)", "def imgWrite(img, path):\n dirMake(os.path.dirname(path))\n sitk.WriteImage(img, path)\n\n # Reformat files to be compatible with CIS Software\n #ext = os.path.splitext(path)[1].lower()\n #if ext == \".vtk\": vtkReformat(path, path)", "def appendpics(pathofimg, w_sub, h_sub, step):\n num = 0\n dirlist = []\n images = [] # images in each folder\n for root, dirs, fileswer in os.walk(pathofimg):\n if len(dirs)!= 0:\n for dir in dirs:\n dirlist.append(dir)\n for rooert, dirwerwes, files in os.walk(pathofimg+'/'+dir):\n for file in files:\n if(file.endswith('.png')):\n images.append(Image.open(pathofimg+'/'+dir+'/'+file))\n if(len(images)==81):\n break\n target = montage(images, w_sub, h_sub, step)\n target.save(pathofimg +'/'+ dir + '.png', quality=100)\n else:\n dir = 'Generated'\n for file in fileswer:\n if (file.endswith('.png')):\n images.append(Image.open(pathofimg +'/'+ file))\n target1 = montage(images, w_sub, h_sub, step)\n savepath = pathofimg +'/'+ 'generated'\n os.makedirs(savepath)\n target1.save(savepath +'/'+ dir + '.png', quality=100)", "def _directory_path(self):\n if not os.path.isdir(self.new_img_dir) : os.mkdir(self.new_img_dir)\n if not os.path.isdir(os.path.join(self.new_img_dir, \"train\")) : os.mkdir(os.path.join(self.new_img_dir, \"train\"))\n if not os.path.isdir(os.path.join(self.new_img_dir, \"test\")) : os.mkdir(os.path.join(self.new_img_dir, \"test\"))", "def save_images(images, filenames, output_dir):\n for i, filename in enumerate(filenames):\n dirname = os.path.dirname(filename)\n\n if dirname!='':\n dirpath = os.path.join(output_dir, dirname)\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)\n Image.fromarray(img).save(f)", "def create_directories(dir_names: list, base_path: str):\n\tfor dir_name in dir_names:\n\t\timage_dir = join(base_path, str(dir_name) + 'x')\n\t\tif not isdir(image_dir):\n\t\t\tos.mkdir(image_dir)", "def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")", "def make_dirs(path):\n ds_path = os.path.join(os.path.join(path, DATASET_DIR))\n x_path = os.path.join(ds_path, IMG_DIR)\n y_path = os.path.join(ds_path, MSK_DIR)\n paths = [ds_path, x_path, y_path]\n\n for p in paths:\n try:\n os.mkdir(p)\n logger.info(\"Directory %s created\", ds_path)\n except Exception as e:\n logger.warning(\"Failed to create directories: %s\", e)\n\n return ds_path, x_path, y_path", "def create_folders():\n os.makedirs(GRID_DIR, exist_ok=True)", "def create_directories(path):\n directories = ['images', 'pdf', 'videos', 'audio', 'spreedsheet', 'text', 'scripts', 'docs', 'other']\n for directory in directories:\n create_directory(path, directory)", "def mkdir(path):", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def save_image(dirname, filename, img):\r\n if os.path.exists(dirname) == 0:\r\n os.makedirs(dirname)\r\n cv2.imwrite(dirname+filename+\".bmp\", img)", "def create_folder(self):\n Path(self.root_name).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/\").mkdir(parents=True, exist_ok=True)\n Path(self.image_folder_path).mkdir(parents=True, exist_ok=True)\n Path(self.annot_path).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/ImageSets/\").mkdir(parents=True, exist_ok=True)\n Path(self.txt_path).mkdir(parents=True, exist_ok=True)", "def save_image(image, output_folder, output_name):\n\n\tfolder_path = compute_path(output_folder, 'dataset')\n\tos.makedirs(folder_path, exist_ok=True)\n\n\tfile_path = os.path.join(folder_path, output_name + '.png')\n\timage.save(file_path)", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def create_paths(manager, parentpath=\"extractor_test_results/HoG/\"):\n \n paths_to_create = [\"data/features_all\", \"data/features_filled\",\n \"data/pair/both\", \"hog_images\", \"hog_plots\",\n \"orig_frames\", \"processed_frames\", \"evaluation\"]\n \n for path in paths_to_create:\n manager.make_folder(parentpath + path)", "def create_train_file(img_folder_path: str, train_file_path: str) -> None:\n files = []\n for ext in (\"*.gif\", \"*.png\", \"*.jpg\", \"*.bmp\"):\n img_path = glob(join(img_folder_path, ext))\n if img_path:\n files.extend(img_path)\n\n write_to_train_file(files, train_file_path)\n\n print(\"Training files are created in \" + img_folder_path)", "def create_output_folder(output_folder_name: str, finding_labels: list):\n if not os.path.isdir(output_folder_name):\n os.mkdir(output_folder_name)\n for type in ['/train', '/val', '/test']:\n if not os.path.isdir(output_folder_name + type):\n os.mkdir(output_folder_name + type)\n for disease in finding_labels:\n if not os.path.isdir(output_folder_name + type + '/' + disease):\n os.mkdir(output_folder_name + type + '/' + disease)", "def _make_dirs(filepath, mode):\n parent = filepath.parent\n if \"w\" in mode and parent:\n os.makedirs(parent, exist_ok=True)", "def merge_folders():\r\n from shutil import copyfile\r\n # Merge all folders into main folder\r\n grp_img_dir = os.listdir('Group_Test_Images')\r\n \r\n for grp_img_folder in grp_img_dir:\r\n image_folders = os.listdir('Group_Test_Images'+'/'+grp_img_folder)\r\n \r\n for img_label in image_folders:\r\n new_directory = 'Group_Test_Images'+'/'+img_label\r\n \r\n try:\r\n os.makedirs(new_directory)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n \r\n file_names = os.listdir('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label)\r\n \r\n for file in file_names:\r\n copyfile('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label+'/'+file, new_directory+'/'+file)", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_folders(self):\n\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def save_imgs(self):\n print(\"Saving the images with required categories ...\")\n os.makedirs(self.imgs_dir, exist_ok=True)\n # Save the images into a local folder\n for im in tqdm(self.images):\n img_data = requests.get(im['coco_url']).content\n with open(os.path.join(self.imgs_dir, im['file_name']), 'wb') as handler:\n handler.write(img_data)", "def save_images(images, save_dir, image_type):\n for image in images:\n raw_img = urllib2.urlopen(image).read()\n count = len([i for i in os.listdir(save_dir) if image_type in i]) + 1\n f = open(save_dir + '/' + image_type + '_' + str(count), 'wb')\n f.write(raw_img)\n f.close()", "def copy_files_and_create_dirs(files) -> None:\r\n for file in files:\r\n target_dir_name = os.path.dirname(file[1])\r\n\r\n # will create all intermediate-level directories\r\n if not os.path.exists(target_dir_name):\r\n os.makedirs(target_dir_name)\r\n\r\n shutil.copyfile(file[0], file[1])", "async def save_url_images(images):\n for source, image in images:\n name = source.split('/')[-1]\n async with aiofiles.open(f'{OUTPUT_FOLDER}/{name}', 'wb') as f:\n await f.write(image)", "def prepare_folders():\n folder_list = [\"./data\", \"./data/stage\", \"./data/spoken\", \"./data/stage_lemmas\", \"./data/spoken_lemmas\"]\n for folder in folder_list:\n if not os.path.exists(folder):\n os.mkdir(folder)\n print(f\"Created folder {folder}\")\n else:\n print(f\"Folder {folder} already existed\")", "def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)", "def reset_dirs():\n\n image_dir = Config.IMAGE_DIRECTORY\n fig_dir = Config.FIGURE_DIRECTORY\n\n # delete directories\n if os.path.isdir(image_dir):\n shutil.rmtree(image_dir) \n if os.path.isdir(fig_dir):\n shutil.rmtree(fig_dir) \n\n # create directories\n os.mkdir(image_dir)\n orig_dir = os.path.join(image_dir, 'original')\n processed_dir = os.path.join(image_dir, 'processed')\n os.mkdir(orig_dir)\n os.mkdir(processed_dir)\n os.mkdir(fig_dir)\n\n print(f'[INFO] Created image and figure directories.')", "def maybe_save_images(images, filenames):\n\n if FLAGS.output_dir is not None:\n batch_size = images.shape[0]\n for i in xrange(batch_size):\n image_array = images[i, :, :]\n file_path = os.path.join(FLAGS.output_dir, filenames[i])\n image = Image.fromarray(np.uint8(image_array))\n image.save(file_path)", "def write_raw(dir_path, image, subfolder_name=\"raw\"):\n\n if dir_path is None:\n return False\n\n raw_dir = os.path.join(dir_path, subfolder_name)\n mkdir_p(raw_dir)\n\n filename = os.path.join(raw_dir, \"%s.jpg\" % datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S_%f\"))\n cv2.imwrite(filename, image)\n\n return True", "def get_and_create_dirs():\n\n print('Checks for paths for original and segmented images.\\nOriginal and segmented images folders must be organized in the exact same structure (images in the same folders and sub-folders).\\n')\n \n orig_path = None\n\n while orig_path is None:\n orig_path = input('Input root path for original images (default github folder):\\n') or r'.\\chest_xray'\n if not os.path.exists(orig_path):\n orig_path = None\n print('Path doesn\\'t exist, please input a valid directory path.\\n')\n\n seg_path = input('\\nInput root path for segmented images if exists or needs to be created (default github folder):\\n') or r'.\\segmentation'\n if (seg_path is not None) and (not os.path.exists(seg_path)):\n create_dir = 'Z'\n while create_dir not in ['Y', 'N']:\n create_dir = input('Path doesn\\'t exist, would you like to create folder structure for ' + seg_path + ' (Y or N)?\\n')\n if create_dir == 'Y':\n for dirname, _, filenames in os.walk(orig_path):\n os.makedirs(dirname.replace(orig_path, seg_path)) \n if os.path.exists(seg_path):\n print('Directory created.')\n else:\n print('Unknown error while attempting to create directory.')\n else:\n print('Directory not created')\n \n orig_file_ext = input('\\nWhat is the file extension for original images (default jpeg) ?\\n').replace('.', '') or 'jpeg'\n \n seg_model = input('\\nWhat is the path to the segmentation model checkpoint (default github folder) ?\\n') or r'.\\Models\\unet_lung_seg.hdf5'\n \n seg_file_ext = input('\\nWhat is the file extension for segmented images (default png) ?\\n').replace('.', '') or 'png'\n \n return orig_path, seg_path, orig_file_ext, seg_model, seg_file_ext", "def save_images(images, db, path):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n copy_files(files, path)", "def save_step_1(imgs, output_path='./output/step1'):\n # ... your code here ...\n i=0\n for each in imgs:\n i+=1\n cv2.imwrite(output_path+\"/output\"+str(i)+\".jpg\", each)", "def copy_database(path_images, path_labels, path_final_images):\n\n try:\n labels = sorted(os.listdir(path_labels))\n except FileNotFoudError:\n print(\"No such file or directory \", path_labels)\n\n try:\n images = sorted(os.listdir(path_images)) #+ \"RetinaNet_I04590/\"))\n except FileNotFoudError:\n print(\"No such file or directory \", path_images)\n\n \"\"\"if not os.path.exists(path_final_images + \"I04590/\"):\n os.mkdir(path_final_images + \"I04590/\")\n\n if not os.path.exists(path_final_images + \"I045135/\"):\n os.mkdir(path_final_images + \"I045135/\")\n\n if not os.path.exists(path_final_images + \"I090135/\"):\n os.mkdir(path_final_images + \"I090135/\")\n\n if not os.path.exists(path_final_images + \"I4590135/\"):\n os.mkdir(path_final_images + \"I4590135/\")\n\n if not os.path.exists(path_final_images + \"Params/\"):\n os.mkdir(path_final_images + \"Params/\")\n\n if not os.path.exists(path_final_images + \"Pauli2/\"):\n os.mkdir(path_final_images + \"Pauli2/\")\n\n if not os.path.exists(path_final_images + \"Pauli3/\"):\n os.mkdir(path_final_images + \"Pauli3/\")\n\n if not os.path.exists(path_final_images + \"Stokes/\"):\n os.mkdir(path_final_images + \"Stokes/\")\n\n if not os.path.exists(path_final_images + \"Rachel/\"):\n os.mkdir(path_final_images + \"Rachel/\")\n\n if not os.path.exists(path_final_images + \"Rachel2/\"):\n os.mkdir(path_final_images + \"Rachel2/\")\"\"\"\n\n for k in range(len(images)):\n if str(k) + \".xml\" in labels:\n copyfile(path_images + \"/\" + images[k],\n path_final_images + \"/\" + images[k])\n \"\"\"copyfile(path_images + \"RetinaNet_I04590/\" + str(k) + \".png\",\n path_final_images + \"I04590/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I045135/\" + str(k) + \".png\",\n path_final_images + \"I045135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I090135/\" + str(k) + \".png\",\n path_final_images + \"I090135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I4590135/\" + str(k) + \".png\",\n path_final_images + \"I4590135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Params/\" + str(k) + \".png\",\n path_final_images + \"Params/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli2/\" + str(k) + \".png\",\n path_final_images + \"Pauli2/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli3/\" + str(k) + \".png\",\n path_final_images + \"Pauli3/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Stokes/\" + str(k) + \".png\",\n path_final_images + \"Stokes/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel/\" + str(k) + \".png\",\n path_final_images + \"Rachel/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel2/\" + str(k) + \".png\",\n path_final_images + \"Rachel2/\" + str(k) + \".png\")\n copyfile(path_labels + str(k) + \".xml\",\n path_final_labels + str(k) + \".xml\")\"\"\"\n print(k)", "def create_folder(folders_to_create=[]):\n for f in folders_to_create:\n if not os.path.exists(f):\n os.makedirs(f)", "def group_image(directory, image, group):\r\n\tif os.path.exists(directory + \"\\\\\" + group):\r\n\t\tpass\r\n\telse:\r\n\t\ttry:\r\n\t\t\tos.mkdir(directory + '\\\\' + group)\r\n\t\t\tprint(\"Successfully created directory\", group)\r\n\t\texcept OSError:\r\n\t\t\tprint(\"Creation of directory failed.\")\r\n\ttry:\r\n\t\tshutil.copy(str(directory + '\\\\' + image), str(directory + \"\\\\\" + group + \"\\\\\" + image))\r\n\texcept OSError as OSe:\r\n\t\tprint(OSe)", "def make_empty_directories_linux() -> None:\n mkdir(PICTURES_DIR / 'screenshots' / 'grim')\n mkdir(PICTURES_DIR / 'screenshots' / 'swappy')", "def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)", "def init_image_info():\n if not os.path.exists(UPLOAD_FOLDER):\n os.makedirs(UPLOAD_FOLDER)", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def _save_images(self, \n img_urls, \n data_path, \n address):\n\n try:\n # if address is invalid, discontinue the process\n if not address:\n return 0\n\n # this is the path we want the OS to come back\n # when it finishes the image saving tasks\n current_path = os.getcwd()\n os.chdir(data_path)\n \n # create a folder for the apartment if it doesn't\n # exist inside the section folder\n if not os.path.exists(address):\n os.mkdir(address)\n os.chdir(address)\n\n # write images inside the apartment folder\n for i, img_url in enumerate(img_urls):\n browser = self._browser\n browser.get(img_url)\n browser.save_screenshot(f'img{i}.jpg')\n \n os.chdir(current_path)\n return 1\n except:\n os.chdir(current_path)\n return 0", "def _save_images(self, \n img_urls, \n data_path, \n address):\n\n try:\n # if address is invalid, discontinue the process\n if not address:\n return 0\n\n # this is the path we want the OS to come back\n # when it finishes the image saving tasks\n current_path = os.getcwd()\n os.chdir(data_path)\n \n # create a folder for the apartment if it doesn't\n # exist inside the section folder\n if not os.path.exists(address):\n os.mkdir(address)\n os.chdir(address)\n\n # write images inside the apartment folder\n for i, img_url in enumerate(img_urls):\n browser = self._browser\n browser.get(img_url)\n browser.save_screenshot(f'img{i}.jpg')\n \n os.chdir(current_path)\n return 1\n except:\n os.chdir(current_path)\n return 0", "def create_folder(path):\n command = ['mkdir', TEST_DIR]\n file_operation(path, command)", "def _create_directories(self):\n print \"[--init] creating directory structure in %s\" % (self.target_path)\n ensure_path(self.conf_path)\n for subdir in config.PROCESSING_AREAS:\n subdir_path = self.data_path + os.sep + subdir\n ensure_path(subdir_path)", "def write_tmp_imgs(*imgs, **kwargs):\n valid_keys = set((\"create_files\",))\n input_keys = set(kwargs.keys())\n invalid_keys = input_keys - valid_keys\n if len(invalid_keys) > 0:\n raise TypeError(\"%s: unexpected keyword argument(s): %s\" %\n (sys._getframe().f_code.co_name,\n \" \".join(invalid_keys)))\n create_files = kwargs.get(\"create_files\", True)\n\n if create_files:\n filenames = []\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n for img in imgs:\n _, filename = tempfile.mkstemp(prefix=\"nilearn_\",\n suffix=\".nii\",\n dir=None)\n filenames.append(filename)\n img.to_filename(filename)\n\n if len(imgs) == 1:\n yield filenames[0]\n else:\n yield filenames\n\n for filename in filenames:\n os.remove(filename)\n else: # No-op\n if len(imgs) == 1:\n yield imgs[0]\n else:\n yield imgs", "def createFolder(self):\n raise NotImplementedError", "def create_dirs():\n\tif os.path.isdir(path):\n\t\tshutil.rmtree(path, ignore_errors=True)\n\tos.makedirs(path+\"/log\",exist_ok=True)\n\tos.makedirs(path+\"/losses\",exist_ok=True) \n\tos.makedirs(path+\"/samples\",exist_ok=True)\n\tos.makedirs(path+\"/model\",exist_ok=True)\n\tos.makedirs(path+\"/datasets\",exist_ok=True)\n\tshutil.copy2(\"config.py\", path+\"/config.py\")\n\tfor i in rconfig[\"datasets\"]:\n\t\tdsconfig = get_dsconfig(i)\n\t\tos.makedirs(path+\"/datasets/\"+dsconfig[\"id\"],exist_ok=True)\n\t\tshutil.copy2(i+\"/dsconfig.py\", path+\"/datasets/\"+dsconfig[\"id\"]+\"/dsconfig.py\")\n\t\tcopytree(dsconfig[\"split\"], path+\"/datasets/\"+dsconfig[\"id\"]+\"/split\")", "def _save_images(self, \n img_urls, \n data_path, \n address):\n\n try:\n # if address is invalid, discontinue the process\n if not address:\n return 0\n\n # this is the path we want the OS to come back\n # when it finishes the image saving tasks\n current_path = os.getcwd()\n os.chdir(data_path)\n \n # create a folder for the apartment if it doesn't\n # exist inside the section folder\n if not os.path.exists(address):\n os.mkdir(address)\n os.chdir(address)\n\n # write images inside the apartment folder\n for i, img_url in enumerate(img_urls):\n img_data = requests.get(img_url).content\n with open(f'img{i}.jpg', 'wb') as handler:\n handler.write(img_data)\n \n os.chdir(current_path)\n return 1\n except:\n os.chdir(current_path)\n return 0", "def storeToHome(userid, name, images, text=None, expire=False):\n if expire:\n clientdir= os.path.join(homedir,userid,'expire',name)\n else:\n clientdir = os.path.join(homedir, userid, name)\n if not os.path.exists(clientdir):\n os.makedirs(clientdir)\n for image in images:\n saveImage(image, clientdir)\n \n if text is not None:\n saveText(text,clientdir)", "def create_dataset_folder_structure():\n\n path = Path(f'{DATASETS}/{FEATURES_DATASET}')\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n\n try:\n for path in new_sensor_paths:\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n else:\n print(\"\\nPath already exists!\")\n except:\n return False\n else:\n return True", "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def make_directory(scripts):\n if not os.path.exists(os.path.join(os.path.dirname(__file__), 'Uploads')):\n os.makedirs(os.path.join(os.path.dirname(__file__), 'Uploads'))\n for script_object in scripts:\n if script_object.type is None:\n continue\n path = script_object.type.split('::')\n path = os.path.join(os.path.dirname(__file__), \"/\".join(path[:-1]))\n if not os.path.exists(path):\n os.makedirs(path)", "def mymkdir(*folders):\n for folder in folders:\n if not os.path.exists(folder):\n os.mkdir(folder)", "def make_folder(path,folder_names):\n for folder in folder_names:\n if not os.path.exists(os.path.join(path,folder)):\n os.makedirs(os.path.join(path,folder))", "def MakeDestinationDirectories(self, dst_files):\n for dst in dst_files:\n path = os.path.dirname(dst);\n if (len(path) > 0) and (not os.path.exists(path)):\n self.VerboseMsg(\"Make Directory: \" + path)\n if self.execute:\n os.makedirs(path)", "def make_directories(file_path):\n logger.info(\"Create all directories in the path %s\", file_path)\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n else:\n logger.warning(\"Cannot create directories %s. The directory already exists\", file_path)", "def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")", "def save_folder(date_time, sfid, logs_folder, checkpoints_folder):\n date_now = str(date_time.date())\n time_now = str(date_time.time())\n sf = \"saved_models/\" + date_now + \"_\" + time_now + \"_\" \\\n + os.path.basename(__file__).split('.')[0] + '_' + sfid\n if not os.path.isdir(sf):\n os.makedirs(sf)\n\n lf = sf +'/' + logs_folder\n if not os.path.isdir(lf):\n os.makedirs(lf)\n chkf = sf +'/' +checkpoints_folder\n if not os.path.isdir(chkf):\n os.makedirs(chkf)\n\n\n return sf, lf, chkf", "def create_directories():\n directories = ['train', 'test', 'validation']\n\n for directory in directories:\n try:\n os.mkdir(directory)\n except OSError:\n print (f\"Creation of the directory '{directory}' failed\")", "def _crop_write_image(self, inroot, images, outroot):\n for image in images:\n inimage_path = osp.join(inroot, image)\n cvimg = cv2.imread(inimage_path)\n cvimg = cvimg[60:-30, 25:-25]\n h, w, _ = cvimg.shape\n assert h == w == 128\n outimage_path = osp.join(outroot, image)\n cv2.imwrite(outimage_path, cvimg)\n print(outimage_path)", "def exporting_cropped_images (fpath_tiff):\n src = rasterio.open(fpath_tiff, 'r')\n outfolder_irregular = '/train/irregular'\n outfolder_healthy = '/train/healthy'\n outfolder_concrete = '/train/concrete'\n outfolder_incomplete = '/train/incomplete'\n outfolder_other = '/train/other'\n outfolder = '/train/batch'\n #os.makedirs (outfolder, exist_ok = True)", "def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))", "def _create_folders(tmp_folder: str = None):\n if not os.path.exists(tmp_folder):\n os.makedirs(tmp_folder)\n logging.info(\"Created folder: %s\", tmp_folder)\n\n tmp_user_data = tmp_folder + \"/user-data\"\n if not os.path.exists(tmp_user_data):\n os.makedirs(tmp_user_data)\n logging.info(\"Created folder: %s\", tmp_user_data)\n\n tmp_data_path = tmp_folder + \"/data-path\"\n if not os.path.exists(tmp_data_path):\n os.makedirs(tmp_data_path)\n logging.info(\"Created folder: %s\", tmp_data_path)\n\n tmp_cache_dir = tmp_folder + \"/cache-dir\"\n if not os.path.exists(tmp_cache_dir):\n os.makedirs(tmp_cache_dir)\n logging.info(\"Created folder: %s\", tmp_cache_dir)", "def create_files(project_name, root_dir):\r\n root_dir = projectfolders.create_path(root_dir, project_name) #Modify the root\r\n \r\n write_setup(project_name, root_dir)\r\n write_inits(project_name, root_dir)\r\n write_tests(project_name, root_dir)", "def write_image_to_file_incrementally(image):\r\n i = 0\r\n while os.path.exists(\"sample%s.jpeg\" % i):\r\n i += 1\r\n with open(\"sample%s.jpeg\" % i, \"wb\") as f:\r\n f.write(image)", "def write(self,vname,kmz='out.kmz'):\n\n imgs=[] # to store a list of all images created\n content=[] # the content of the main kml\n vstr='files/%s_%05i.png' # format specification for images (all stored in `files/' subdirectory)\n\n # create empty files subdirectory for output images\n try:\n shutil.rmtree('files')\n except:\n pass\n os.makedirs('files')\n\n # loop through all time slices and create the image data\n # appending to the kml content string for each image\n for i in xrange(0,self.nstep,1):\n kml=ncNWRC(self.filename,istep=i)\n img=vstr % (vname,i)\n imgs.append(img)\n content.append(kml.image2kml(vname,img))\n\n # create the main kml file\n kml=ncNWRC.kmlstr % \\\n {'content':'\\n'.join(content),\\\n 'prog':ncNWRC.progname}\n\n # create a zipfile to store all images + kml into a single compressed file\n z=zipfile.ZipFile(kmz,'w',compression=zipfile.ZIP_DEFLATED)\n z.writestr(kmz[:-3]+'kml',kml)\n for img in imgs:\n z.write(img)\n z.close()", "def create_test_folder(df_test, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/test')\n print(f'Create test set at: {folder_path}')\n for _, row in tqdm(df_test.iterrows(), total=df_test.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'test', row['filename'])\n shutil.copy(img, destination_path )" ]
[ "0.71206725", "0.70679843", "0.70667833", "0.69145185", "0.68923235", "0.6808798", "0.680336", "0.67864877", "0.6739885", "0.66490185", "0.65963286", "0.65840054", "0.656958", "0.65321773", "0.6529073", "0.6489769", "0.647365", "0.6456709", "0.642183", "0.641787", "0.64059615", "0.6381406", "0.6368196", "0.6366809", "0.635643", "0.63477075", "0.6346835", "0.63443744", "0.6335922", "0.632685", "0.6324597", "0.6318195", "0.6307423", "0.6302878", "0.6268628", "0.6268512", "0.6266732", "0.6251383", "0.6245306", "0.6218361", "0.6214735", "0.62090456", "0.6200626", "0.6196565", "0.61823094", "0.6178356", "0.6175806", "0.6172727", "0.61674523", "0.6149617", "0.6137046", "0.61140704", "0.6105972", "0.6105972", "0.6095062", "0.6088508", "0.6086697", "0.60828626", "0.6074456", "0.6071429", "0.606919", "0.60680765", "0.6064291", "0.606234", "0.60453117", "0.6036731", "0.6034132", "0.60280627", "0.60146147", "0.600174", "0.59978485", "0.5989082", "0.5968429", "0.5957606", "0.59561884", "0.59561884", "0.5946221", "0.59457886", "0.5937497", "0.5936611", "0.59357727", "0.59199035", "0.5904483", "0.59035194", "0.5900511", "0.58984256", "0.58977264", "0.58955014", "0.589523", "0.5891581", "0.5890103", "0.58869654", "0.5877315", "0.58756906", "0.58736813", "0.58674854", "0.58665156", "0.5864746", "0.5860785", "0.58598983", "0.58570147" ]
0.0
-1
Determine column and row position for filename.
def get_image_column_row(filename): row, column = os.path.splitext(filename)[0][-5:].split("_") return (int(column) - 1, int(row) - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_position(self): # maybe encoded in filepath at some point\n result = (self.iter * self.row_step)% self.row_size, self.iter // (self.row_size * self.row_step)* self.col_step\n self.iter += 1\n return result", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def position(file_, pattern):\n pattern = pattern[1:-1]\n pattern = pattern.replace('(', '\\(')\n pattern = pattern.replace(')', '\\)')\n file_obj = open(file_, 'rU')\n for line_number, line in enumerate(file_obj):\n m = re.search(pattern, line)\n if m is not None:\n return line_number, m.pos\n file_obj.close()\n return 0, 0", "def get_position(self, number):\n for rowidx, row in enumerate(self.numbers):\n for colidx, num in enumerate(row):\n if num == number:\n return rowidx, colidx", "def extract_row_and_col_number(self, entry):\n\n row_col_string = entry.split(\"_\")\n row = int(row_col_string[0][1:])\n col = int(row_col_string[1][1:])\n return row, col", "def column_index(input_file, name):\n col, com = find_columns(input_file)\n col_name = name\n contents = open(input_file, 'r').readlines()\n for line in contents:\n if com[col.index(col_name)] in line:\n line_index = contents.index(line)+1\n return line_index", "def position(self) -> Tuple[int, int]:\n return self.row, self.col", "def _errpos(self, fpos):\r\n filename, string = self._includestack[-1]\r\n return filename, srow(string, fpos), scol(string, fpos)", "def get_position(filestring, position):\n lines = filestring.split(\"\\n\")\n line_number, place, count = 0, 0, 0\n #print \"Number of lines: \", len(lines)\n \n while line_number < len(lines):\n line = lines[line_number]\n new_count = count + len(line) #+ 1 # +1 nes dar newline pridedame\n if position <= new_count:\n place = position - count\n break\n count = new_count # +1 nes dar newline pridedame\n line_number += 1\n \n print \"\\n\".join([\"%s:%s\" % ((\"===> \" if i==line_number else \"\") + str(i), lines[i]) for i in xrange(len(lines))])\n return (line_number, place)", "def get_position(self):\n\n return (self._fileobj.tell() - self._pos) * 8 - self._bits", "def part2(filename: str) -> int:\n data = parse(filename)\n return index_of_floor(data, -1)", "def get_coords(self) -> Tuple[int]:\r\n return self.file, self.rank", "def get_pos_index(self):\n return [self.row-1, self.col-1]", "def count_positions(fname):\r\n with open(fname) as f:\r\n for i, l in enumerate(f):\r\n pass\r\n return i + 1", "def pos(self):\n return self.file.tell()", "def get_current_position(self) -> Tuple[int, int]:\n return self.__row_position, self.__col_position", "def cursor_coordinates(self):\n text = self.getText()\n lines = text.split(\"\\n\")\n pos = self.getCursorPos()\n if pos == 0:\n return (0, 0)\n i = 0\n cursor_row = -1\n cursor_col = -1\n for row, line in enumerate(lines):\n i += len(line) + 1 # we need to include \"\\n\"\n if pos < i:\n cursor_row = row\n cursor_col = pos - i + len(line) + 1\n break\n return (cursor_col, cursor_row)", "def _get_header_position(header_row: List[str], column_title: str) -> int:\n for pos, column in enumerate(header_row):\n if column_title.lower() in column.lower():\n return pos\n\n raise Exception(\"Expected column header not found for {}\".format(column_title))", "def get_column(filename, column_name):\n with open(filename) as f:\n for header in f:\n columns = header.rstrip().split(\"\\t\")\n return columns.index(column_name)", "def _get_file_info(filename):\n filename = os.path.split(filename)[-1]\n filename = filename[:str.rfind(filename, '.jsonl.gz')]\n _, mode, idx = filename.split('_')\n return mode, idx", "def get_row_col_number(self, index):\n row_num = index // self.spatial_cols\n col_num = index % self.spatial_cols\n return row_num, col_num", "def xFileInfo(filename):\n delim = getDelimiter(filename)\n f = open(filename, 'r')\n reader = csv.reader(f, delimiter=delim)\n num_rows = 0\n for (row_i, row) in enumerate(reader):\n if row_i == 0: #ignore empty strings (e.g. at end of row)\n num_cols = len([val for val in row if val])\n num_rows += 1\n f.close()\n return (num_rows, num_cols)", "def line_col_to_offset(self,\n snapshot: Bug,\n filepath: str,\n line_num: int,\n col_num: int\n ) -> int:\n assert line_num > 0\n assert col_num >= 0\n line_col_s = \"%s/%s[%d:%d]\".format(snapshot.name,\n filepath,\n line_num,\n col_num)\n logger.debug(\"Transforming line-column, '%s', into a character offset\", # noqa: pycodestyle\n line_col_s)\n line_offsets = self._line_offsets(snapshot, filepath)\n line_starts_at = line_offsets[line_num - 1]\n offset = line_starts_at + col_num\n logger.debug(\"Transformed line-column, '%s', into character offset: %s\", # noqa: pycodestyle\n line_col_s,\n offset)\n return offset", "def _get_column_offset(self, lnum, colnum, **opts):\n start, end = self._get_linespan(lnum)\n length = end - start\n cpos = self._col2pos(start, colnum, **opts)\n if cpos < 0 or cpos >= length:\n raise IndexError(\"column out of bounds\")\n\n return start + cpos", "def index_to_position(self, index):\n col = index % self._grid_size\n row = index // self._grid_size\n return row, col", "def find_column(text, index):\n\n last_cr = text.rfind(\"\\n\", 0, index)\n if last_cr < 0:\n last_cr = 0\n column = (index - last_cr) + 1\n return column", "def line_offsets(fname):\n line_offset = []\n offset = 0\n for _, line in enumerate( open(fname) ):\n line_offset.append(offset)\n offset += len(line)\n return line_offset", "def filename_line(skip: int = 2) -> Tuple[str, int]:\n stack = inspect.stack()\n start = skip\n parentframe = stack[start][0]\n\n filename = 'N/A'\n module = inspect.getmodule(parentframe)\n if module:\n filename = os.path.basename(os.path.realpath(module.__file__))\n\n return filename, parentframe.f_lineno", "def parsePosition(self, parse):\n\n if len(parse) == 2:\n ch1 = ord(parse[0].lower())\n ch2 = ord(parse[1].lower())\n\n maxNum = 48 + self.board.size # ascii of max row #\n\n # [Row#][ColLetter]] case\n if 48 < ch1 <= maxNum and 97 <= ch2 < (97 + self.board.size):\n return maxNum - ch1, ch2 - 97 # actual grid indexes of desired position\n\n # [ColLetter][Row#] case\n if 48 < ch2 <= maxNum and 97 <= ch1 < (97 + self.board.size):\n return maxNum - ch2, ch1 - 97 # actual grid indexes of desired position\n return False", "def get_column(self, pos, **opts):\n lnum, cnum = self._get_column(pos, **opts)\n return lnum + self.LINE_NUM_BASE, cnum + self.COLUMN_NUM_BASE", "def _get_image_position(self):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get(\"image\", None)\n if image_conf:\n return image_conf.get(\"position\", u\"left\")", "def position_helper():\n for file_name in file_list[:1]:\n file_bits = file_splitter(file_name)\n line_length = len(max(file_bits, key=len)) + 13\n index = 0\n print('\\n' + ('-' * line_length))\n for x in file_bits:\n print('Index ', str(index), ' = ', file_bits[index])\n index += 1\n print(('-' * line_length) + '\\n')", "def _get_file_positions(self,filename):\n if os.path.exists(self._ahfBasename + 'fpos'):\n f = util.open_(self._ahfBasename + 'fpos')\n for i in range(self._nhalos):\n self._halos[i+1].properties['fstart'] = int(f.readline())\n f.close()\n else:\n f = util.open_(filename)\n for h in xrange(self._nhalos):\n if len((f.readline().split())) == 1:\n f.readline()\n self._halos[h+1].properties['fstart'] = f.tell()\n for i in xrange(self._halos[h+1].properties['npart']):\n f.readline()\n f.close()", "def _get_image_index_position(self) :\n \n return self._image_index_position", "def _get_column(self, pos, **opts):\n lnum, cpos = self._get_linepos(pos)\n start, end = self._get_linespan(lnum)\n return lnum, self._pos2col(start, cpos, **opts)", "def _index(orig, off):\n orig_x, orig_y = orig\n off_x, off_y = off\n return (orig_y - off_y) * self.ncols + (orig_x - off_x)", "def get_row_index(self):\n for row in range(self.model.rowCount()):\n name_item = self.model.item(row, self.COL_NAME)\n fullpath = name_item.data(self.ROLE_FULLPATH)\n if fullpath == self.filepath:\n return row", "def get_cxi_file_position(exp_line, exp_name, user_name, process_stage, run_num):\n\n # Construct the file address of the corresponding cxi file\n file_name = '/reg/d/psdm/{}/{}/{}/{}/psocake/r{:0>4d}/{}_{:0>4d}.cxi'.format(exp_line,\n exp_name,\n process_stage,\n user_name,\n run_num,\n exp_name,\n run_num)\n return file_name", "def getCoords(file):\n global demag\n name = file.split('.')[0]\n name = name.split('_')\n x = int(name[2])//demag\n y = int(name[3])//demag\n return(int(x),int(y))", "def parse_pos(self, pos):\r\n\r\n column = ord(pos[0]) - 97\r\n if len(pos) == 2:\r\n row = ord(pos[1]) - 49\r\n else:\r\n row = 9\r\n return [row, column]", "def get_position(self):\n return [self._row, self._column]", "def _get_index(position, file_name, file_type):\n position = position.lower()\n if position not in [\"start\", \"end\"]:\n return \"That position is not available\"\n\n file_type = file_type.lower()\n if file_type not in [\"variant\", \"read\"]:\n return \"That format is not available\"\n \n file_in = 0\n file_path = f\"{LOCAL_FILES_PATH}/{file_name}\"\n if file_type == \"variant\":\n file_in = VariantFile(file_path, \"r\")\n elif file_type == \"read\":\n file_in = AlignmentFile(file_path, \"r\")\n \n # get the required index\n if position == \"start\":\n start = 0\n for rec in file_in.fetch():\n start = rec.pos\n break\n return start\n elif position == \"end\": \n end = 0\n for rec in file_in.fetch():\n end = rec.pos\n return end", "def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None", "def record_position(self):\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind", "def get_pos(self):\n return [self.row, self.col]", "def get_location(self, filename, position):\r\n f = self.get_file(filename)\r\n\r\n if isinstance(position, int):\r\n return SourceLocation.from_offset(self, f, position)\r\n\r\n return SourceLocation.from_position(self, f, position[0], position[1])", "def get_xy_position(row, col):\n spacing_x = 86 + 11\n spacing_y = 98 + 8\n top_y = 50\n left_x = 50\n return left_x + col * spacing_x, top_y + row * spacing_y", "def _ComputeOffset( contents, line, column ):\n contents = ToBytes( contents )\n current_line = 1\n current_column = 1\n newline = bytes( b'\\n' )[ 0 ]\n for i, byte in enumerate( contents ):\n if current_line == line and current_column == column:\n return i\n current_column += 1\n if byte == newline:\n current_line += 1\n current_column = 1\n message = COMPUTE_OFFSET_ERROR_MESSAGE.format( line = line,\n column = column )\n _logger.error( message )\n raise RuntimeError( message )", "def getColumnPosition(sheetData, columnName, defaultPosition):\n headerRow = getHeaderRowPosition(sheetData)\n columnRow = sheetData[headerRow]\n try:\n return columnRow.index(columnName)\n except ValueError:\n return defaultPosition", "def find_file_start(chunks, pos):\n\n\tpos = pos - 1\n\twhile pos > 0:\n\n\t\tif chunks[pos][0] != 0x100 and chunks[pos][0] != 0x102:\n\n\t\t\t# This is not a block\n\t\t\treturn pos\n\n\t\telse:\n\t\t\tpos = pos - 1\n\n\treturn pos", "def _col2pos(self, start, colnum, **opts):\n tw = opts.get('tab_width', self.TAB_WIDTH)\n tt = opts.get('tab_type', 'stop')\n if tt == 'fixed':\n\n def advance(p):\n return p + tw\n else:\n\n def advance(p):\n return tw * ((p + tw) // tw)\n\n epos = cpos = 0\n while epos < colnum:\n if self.input[start] == '\\t':\n epos = advance(epos)\n else:\n epos += 1\n start += 1\n cpos += 1\n return cpos - (epos > colnum)", "def get_position_coords(cls):\n row = math.floor(cls.position / cls.size)\n col = cls.position - row * cls.size\n return row, col", "def get_CHR_data_position(handle):\n\n # raw CHR data?\n fileSize = handle.seek(0, 2)\n if fileSize > 0 and fileSize % 256 == 0:\n return (0, fileSize)\n\n # iNES ROM file?\n try:\n iNESInfo = ineslib.parse_iNES_header(handle)\n except ineslib.iNESError as error:\n sys.exit(\n \"The input file is neither a valid iNES ROM file (error: {:s}) nor valid raw CHR data \"\n \"(invalid file size).\".format(str(error))\n )\n if iNESInfo[\"CHRSize\"] == 0:\n sys.exit(\"The iNES file has no CHR ROM.\")\n return (16 + iNESInfo[\"trainerSize\"] + iNESInfo[\"PRGSize\"], iNESInfo[\"CHRSize\"])", "def find_file(line, column, *, cwd=None):\n cwd = cwd or pathlib.Path()\n path = None\n for finder in finders:\n path, lineno = finder(line, column, cwd)\n if path is not None:\n break\n\n if path is None:\n return None, None\n else:\n return path, lineno", "def column_location(self, value):\n\n # Try to use as-is\n try:\n return self._columns.index(value)\n except ValueError:\n pass\n\n # Try as integer index\n try:\n value = int(value)\n\n if value in self._columns:\n location = self._columns.index(value)\n elif value < 0:\n location = value + len(self._columns)\n else:\n location = value\n\n size = len(self._columns)\n if size == 0:\n raise IndexError(\"No columns in table\")\n\n if location >= size:\n raise IndexError(f\"Column ({location}) out of range (0..{size - 1})\")\n\n return location\n except ValueError:\n pass\n\n # No matches\n options = \", \".join(str(col) for col in self._columns)\n raise ValueError(f\"Unknown column name: {value}, current columns: {options}\")", "def find_player_position(labyrinth: Labyrinth) -> Tuple[int, int]:\n for row in range(0, len(labyrinth)):\n for col in range(0, len(labyrinth[0])):\n if labyrinth[row][col] == Labyrinth.START:\n return row, col\n\n # todo: handle exception, if there is no field holding 'S' then something is wrong\n return -1, -1", "def _get_position_grid_column(position, grid_row):\n \n for (box, grid_col_index) in zip(grid_row, range(len(grid_row))):\n if box.contains_point((position.x, position.y)):\n return grid_col_index\n return None", "def _get_charindex(self, x, y):\r\n verts = self.shapes[0].buf[0].vertices\r\n x = x - self.x + verts[2][0]\r\n y = y - self.y + verts[0][1]\r\n nv = len(verts)\r\n for i in range(0, nv, 4):\r\n vtr = verts[i] # top right\r\n vbl = verts[i + 2] # bottom left\r\n if x >= vbl[0] and x < vtr[0] and y >= vbl[1] and y < vtr[1]:\r\n i = int(i / 4)\r\n c_i = self.c_lookup[i]\r\n if c_i == (len(self.txt) - 1) or self.c_lookup[i + 1] > c_i + 1:\r\n if (vtr[0] - x) < (x - vbl[0]):\r\n c_i += 1\r\n return c_i\r\n return len(self.txt)", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def getHeaderRowPosition(sheetData):\n for index, row in enumerate(sheetData):\n if row[1] != '':\n return index\n return 0", "def location_to_pos(self,row, col):\r\n\r\n pos_row = str(row + 1)\r\n pos_col = chr(col + 97)\r\n return pos_col + pos_row", "def _get_image_location(self):\n\t\timagePointer = self.labels['^IMAGE'].split()\n\t\tif len(imagePointer) == 1:\n\t\t\trecordBytes = int(self.labels['RECORD_BYTES'])\n\t\t\timageLocation = (int(imagePointer[0]) - 1) * recordBytes\n\t\telif len(imagePointer) == 2:\n\t\t\tunits = imagePointer[1]\n\t\t\tif not units == '<BYTES>':\n\t\t\t\terrorMessage = (\"Expected <BYTES> image pointer units but found %s\") % (units)\n\t\t\t\traise ValueError, (errorMessage)\n\t\t\telse:\n\t\t\t\timageLocation = int(imagePointer[0])\n\t\telse:\n\t\t\terrorMessage = (\"^IMAGE contains extra information\") % (imageSampleType)\n\t\t\traise ValueError(errorMessage)\n\t\treturn imageLocation", "def lineno():\n linenum = inspect.currentframe().f_back.f_lineno\n frameinfo = inspect.getframeinfo(inspect.currentframe())\n filename = frameinfo.filename\n return str(\"File: \" + str(filename) + \" Line: \" + str(linenum))", "def which_cell(loc_x, loc_y):\n column = int(math.ceil((loc_x - LEFT_MARGIN) / CELL_SIZE))\n row = int(math.ceil((loc_y - TOP_MARGIN) / CELL_SIZE))\n cell_id = (row - 1) * CELL_COLUMN + column\n return cell_id", "def __convert_position(self, row_position: int = None, col_position: int = None) -> int:\n if row_position is None or col_position is None:\n return self.__row_position * len(self.__labyrinth[0]) + self.__col_position\n\n return row_position * len(self.__labyrinth[0]) + col_position", "def get_coords_from_position(position, file):\n line_counter = 1\n column_counter = 1\n try:\n with open(file, 'r') as source:\n string = source.read()\n except:\n #unable to open file -> 3\n error.ThrowError(3)\n i = 0\n j = position\n while j > 0:\n if string[i] == '\\n':\n line_counter += 1\n column_counter = 1\n else:\n column_counter += 1\n i += 1\n j -= 1\n return Coords(line_counter, column_counter, position)", "def updatePosition(char, position):\n line, col = position\n return (line + 1, 1) if (char == '\\n') else (line, col + 1)", "def index_for_file (self):\n return self.family + '_' + self.filename # index for row of dataframe", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def location_of(self, c: str) -> tuple:\n\n c = c.upper()\n if c == 'J': c = 'I'\n\n row = 0\n while row < 5:\n col = self.key[row].find(c)\n\n if col != -1:\n return (row, col)\n\n row += 1\n\n raise ValueError(\"couldn't find letter %r in matrix %r\" % (c, self.key))", "def point2pos(self, point):\n row = self._vim.eval('byte2line({})'.format(point))\n col = self._vim.eval('{} - line2byte({})'.format(point, row))\n return (int(row), int(col))", "def GetTileIndex(self, pos):\r\n #pixel = rpg_image.GetPixel(self.image, pos)\r\n try:\r\n pixel = self.image_buffer[pos[0]][pos[1]]\r\n except IndexError, e:\r\n pixel = -1\r\n \r\n return pixel", "def position(self):\n\n return self.scanner.position()", "def get_index(file_name, res):\r\n # Open file in read mode to get indexes of target and replace\r\n with open(file_name + \".txt\") as f:\r\n content = f.readline()\r\n target = res[2].split()[1]\r\n # Placement of column to update\r\n index = content.index(target) // 2\r\n return index", "def editor_column(self) -> int:\n prefix_str = self._lines[self.raw_line - 1][: self.raw_column]\n tab_adjusted_column = len(expand_tabs(prefix_str))\n # Text editors use a one-indexed column, so we need to add one to our\n # zero-indexed column to get a human-readable result.\n return tab_adjusted_column + 1", "def get_curpos(self):\n for i in range(len(self.tree)):\n if self.path == self.tree[i][2]:\n return i\n else:\n return -1", "def position(self, x, y):\n if self.portrait:\n # HMSB\n index = (x + y * self.size[0]) >> 3\n offset = 7 - (x & 0x07)\n else:\n # VMSB\n index = (y >> 3) * self.size[0] + x\n offset = 7 - (y & 0x07)\n return index, offset", "def get_column_offset(self, lnum, colnum):\n return self._get_column_offset(lnum - self.LINE_NUM_BASE,\n colnum - self.COLUMN_NUM_BASE)", "def vim_cursor():\n\n line, nbyte = vim.current.window.cursor\n\n raw_bytes = vim.current.buffer[line-1][:nbyte]\n\n vc = vim.eval(\"&encoding\")\n col = len(raw_bytes.decode(vc))\n return line, col", "def __get_random_player_position(self) -> Tuple[int, int]:\n no_player_position = True\n while no_player_position:\n for row in range(0, self.__labyrinth.labyrinth_height):\n for col in range(0, self.__labyrinth.labyrinth_width):\n if self.__labyrinth[row][col] == Labyrinth.FLOOR and no_player_position:\n self.__row_position = row\n self.__col_position = col\n\n if len(self.__path_to_end()) > self.__labyrinth.labyrinth_width and \\\n len(self.__path_to_end()) > self.__labyrinth.labyrinth_height:\n self.__labyrinth[row][col] = Labyrinth.START\n no_player_position = False\n\n return self.__row_position, self.__col_position", "def get_image_position(\n self, iop: np.ndarray\n ) -> Tuple[float, float, float]:\n try:\n x = self.header[\"Rows\"]\n y = self.header[\"Columns\"]\n raw_ipp = self.header[\"ImagePositionPatient\"]\n pixel_spacing = self.header[\"PixelSpacing\"]\n except KeyError:\n return\n raw_shape = np.array([x, y])\n\n slice_shape = raw_shape / self.size\n translation_fix = (raw_shape - slice_shape) / 2\n Q = np.fliplr(iop) * pixel_spacing\n return raw_ipp + np.dot(Q, translation_fix[:, None]).ravel()", "def get_colour_position(self, **kwargs):\n try:\n colour = kwargs[\"fname\"].split(' ')\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n if len(colour) > 1:\n colour = colour[1]\n else:\n colour = colour[0]\n self.locator.recognise_grid()\n self.target_locations[colour] = self.locator.find_tetris_block(colour, return_center=True)\n \n self.mm.loadMenu(\"teachMenu\")", "def get_previous_position(self) -> Tuple[int, int]:\n return self.__previous_row_position, self.__previous_col_position", "def handle_instructions(instructions):\n row_instructions = instructions[0:7]\n column_instructions = instructions[7:10]\n row = bisect(row_instructions, (0, 127), \"F\", \"B\")\n column = bisect(column_instructions, (0, 7), \"L\", \"R\")\n return row, column", "def convert_coordinates(coordinates):\r\n row = coordinates[1] - 1\r\n column = letters.index(coordinates[0])\r\n return column, row", "def getColIdx(self, col):\n try: \n return int(col)\n except:\n return ord(col)-ord('a')", "def sindex(string, row, col):\r\n n = 0\r\n for _ in range(row-1):\r\n n = string.find('\\n', n) + 1\r\n return n+col-1", "def find_index_column(sheet, name, num):\n for idx in range(1, 26):\n if sheet[chr(idx + 64) + str(num)].value == name:\n index_col = chr(64 + idx)\n break\n return index_col", "def determine_coordinates_and_cell_names(self):\n self.coordinates_and_cell_headers = [\n annot[0]\n for annot in self.file.columns\n if annot[0].lower() in (\"z\", \"y\", \"x\", \"name\")\n ]\n # annotation column names\n self.annot_column_headers = [\n annot\n for annot in self.file.columns\n if annot[0].lower() not in (\"z\", \"y\", \"x\", \"name\")\n ]", "def tell(self) -> int:\n source_cursor = self.tell_source()\n if self.seekable() or self.header is None:\n return source_cursor\n\n elif self.header.tell() < self.header_buffer_size:\n return self.header.tell()\n\n else:\n return source_cursor", "def getRowPosition(sheetData, text, column):\n for index, row in enumerate(sheetData):\n if row[column] == text:\n return index\n raise ValueError('{0} is not in column {1}'.format(text, column))", "def get_file_position(self, mode):\r\n return bass_call_0(BASS_StreamGetFilePosition, self.handle, mode)", "def _get_cursor_index_position(self) :\n return self._cursor_index_position", "def checkFileFormat(self, cellPos):\n try:\n cellPosList = cellPos.split()\n cellXPos = int(cellPosList[0])\n cellYPos = int(cellPosList[1])\n except ValueError:\n messagebox.showerror(\"Error: Wrong format\", \"The choosen file do not have the correct format. Be so kind to choose an other file.\")\n return False\n pass\n\n return (cellXPos, cellYPos)", "def _position_x_to_column(self, x, y):\n col = -1\n if y>self.padding_top and y<self.padding_top+self.len_y_cercles:\n for i in range(self.n_columns):\n if x>self.padding_left+i*63 and x<self.padding_left+i*63+self.diam_cercles:\n col = i+1\n break\n return col", "def getColIdx(self, col):\n try:\n return int(col)\n except:\n return ord(col)-ord('a')", "def get_position(self, row, column):\n position_key = \"{}{}\".format(row, column)\n return self.positions[position_key]" ]
[ "0.6791335", "0.66289556", "0.66289556", "0.66289556", "0.6522248", "0.6411789", "0.63911855", "0.63667697", "0.6281292", "0.62766576", "0.6266177", "0.6249092", "0.6142638", "0.61390686", "0.6129921", "0.60795605", "0.60582995", "0.60221326", "0.6021388", "0.60078406", "0.5974924", "0.59706306", "0.59696424", "0.5965513", "0.59258085", "0.590232", "0.58866423", "0.58864385", "0.586704", "0.585859", "0.585775", "0.5855617", "0.58474046", "0.5838424", "0.58375406", "0.583373", "0.5829399", "0.5822627", "0.5813991", "0.5809405", "0.58005834", "0.5791035", "0.576392", "0.57623297", "0.575483", "0.5740917", "0.5685418", "0.56672376", "0.5667156", "0.565132", "0.5623177", "0.56182194", "0.55969614", "0.55887973", "0.55860066", "0.5585801", "0.557014", "0.5564164", "0.55590874", "0.5555693", "0.55504555", "0.5545573", "0.5540686", "0.5538178", "0.55228674", "0.55166006", "0.55076456", "0.5501116", "0.5497633", "0.5482697", "0.5482314", "0.5482314", "0.5480863", "0.54781175", "0.5458593", "0.54479045", "0.54464746", "0.5444988", "0.54435897", "0.54248613", "0.54219973", "0.5417826", "0.5413413", "0.54038787", "0.53977555", "0.53931016", "0.5382303", "0.53773284", "0.5375496", "0.53632516", "0.5357387", "0.53560233", "0.5353795", "0.5352478", "0.53498083", "0.5348449", "0.5344165", "0.5335735", "0.53334624", "0.5331072" ]
0.75118536
0
Open all images in a directory. Return tuple of Tile instances.
def open_images_in(directory): files = [ filename for filename in os.listdir(directory) if "_" in filename and not filename.startswith("joined") ] tiles = [] if len(files) > 0: i = 0 for file in files: pos = get_image_column_row(file) im = Image.open(os.path.join(directory, file)) position_xy = [0, 0] count = 0 for a, b in zip(pos, im.size): position_xy[count] = a * b count = count + 1 tiles.append( Tile( image=im, position=pos, number=i + 1, coords=position_xy, filename=file, ) ) i = i + 1 return tiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_pic_in_directory(directory):\n return [Image.open(os.path.join(directory, img)) for img in os.listdir(directory)]", "def get_images(directory=None): #import from mask.py\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def get_images(directory=None):\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def get_images(directory=None):\r\n \r\n if directory == None:\r\n directory = os.getcwd() # Use working directory if unspecified\r\n \r\n image_list = [] # Initialize aggregaotrs\r\n file_list = []\r\n \r\n directory_list = os.listdir(directory) # Get list of files\r\n for entry in directory_list:\r\n if len(file_list)<2:\r\n absolute_filename = os.path.join(directory, entry)\r\n try:\r\n image = PIL.Image.open(absolute_filename)\r\n file_list += [entry]\r\n image_list += [image]\r\n except IOError:\r\n pass # do nothing with errors tying to open non-images\r\n return image_list, file_list", "def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)", "def load_images_from_folder(folder):\n images = []\n for filename in os.listdir(folder):\n img = Image.open(os.path.join(folder,filename))\n images.append(img)\n return images", "def open(*args, **kwargs):\n return MultiFileTileSource(*args, **kwargs)", "def image_iter() -> iter:\r\n return ('Images/' + image for image in IMAGES)", "def load_images(self, image_paths):\n \n fill_list = []\n \n for idx in tqdm(range(len(image_paths))):\n path = image_paths[idx]\n yield cv2.imread(path)", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))", "def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();", "def get_images(self, file_path: str) -> Iterable[Image]:\n return []", "def get_existing_images(directory):\n validate_directory(directory)\n directory += '/'\n try:\n return listdir(directory)\n except:\n mkdir(directory)\n return []", "def load_images_from_directory(input_dir, batch_shape):\n def input_filenames(input_dir):\n all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n all_files.sort()\n return all_files\n\n\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n\n for filepath in input_filenames(input_dir):\n with tf.gfile.Open(filepath, mode='rb') as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n\n # This is a partial batch left over at end.\n # Note that images will still have the proper size.\n if idx > 0:\n yield filenames, images", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def im_open(path):\n\n try:\n assert os.path.isdir(path)\n #get file list in directory - glob includes full path\n files = sorted(glob.glob('{}{}*'.format(path,os.sep)), key=sort_key) \n #load the collection\n raw_stack = io.imread_collection(files)\n #turn the collection into a np array and remove extraneous OCT portion from 1025:1083 on x axis. (z,y,x)\n #if .bmp files are open (from pv-oct), the slicing will not affect them, the x-axis is only 540 pixels.\n stack = io.collection.concatenate_images(raw_stack)[:,:,0:1024]\n \n return stack\n\n except AssertionError:\n sys.exit(\"A non-directory object was given to the __open__ function\")", "def open(*args, **kwargs):\n return TiffFileTileSource(*args, **kwargs)", "def readImages(image_dir):\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(sum(map(glob, search_paths), []))\n images = [cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR) for f in image_files]\n\n bad_read = any([img is None for img in images])\n if bad_read:\n raise RuntimeError(\n \"Reading one or more files in {} failed - aborting.\"\n .format(image_dir))\n\n return images", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 1.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def read_local(path):\n files = os.listdir(path)\n imgs = []\n for f in files:\n if f.endswith(\".tiff\") or f.endswith(\".tif\"):\n img = Image.open(os.path.join(path, f))\n imgs.append(np.array(img))\n return imgs", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n # all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n # test_files = [all_files[idx] for x in np.random.choice(len(all_files), 200, replace=False)]\n # for filepath in test_files:\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_sprites(dir=\"/home/robin/workspace/python/ipt/chess/sprites\"):\n arr = []\n chdir(dir)\n for i in range(12):\n img = mimg.imread(\"sprite_\"+\"{:0>2d}\".format(i)+\".png\")\n arr.append(img)\n return arr", "def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))", "def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n existing_dirs = [os.path.basename(dir) for dir in os.listdir(FLAGS.output_dir)]\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.JPEG')):\n with tf.gfile.Open(filepath, 'rb') as f:\n image = np.array(Image.open(f).resize([FLAGS.image_height, FLAGS.image_width]).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n if os.path.basename(os.path.normpath(input_dir))=='*':\n head, tail = os.path.split(filepath)\n dirname=os.path.basename(head)\n if dirname in existing_dirs:\n continue\n filename = os.path.join(dirname, tail)\n else:\n filename = os.path.basename(filepath)\n filenames.append(filename)\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(files, open_fn=None):\n if open_fn is None:\n import cv2\n open_fn = cv2.imread\n images = list()\n for _file in files:\n images.append(np.asarray(open_fn(_file)))\n return images", "def get_all_images_from_filesystem():\r\n\r\n logging.debug('get_all_images_from_filesystem()')\r\n\r\n dir_path = os.path.join(os.environ['TEMP'],'WarietyWallpaperImages')\r\n all_full_image_paths = []\r\n for my_file in os.listdir(dir_path):\r\n if os.path.isfile(os.path.join(dir_path, my_file)):\r\n all_full_image_paths.append(os.path.join(dir_path, my_file))\r\n return all_full_image_paths", "def show_files(file_locations):\n for file_loc in file_locations:\n show_image(file_loc)", "def instances(self):\n for d in os.listdir(self.directory):\n yield self.instance(self.directory, d)", "def load(self, image_loader):\n self._image_loader = image_loader\n for tile_set in self.tile_sets:\n # do images first, because tiles could reference it\n for img in tile_set.images:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n tile_set.indexed_images[img.id] = self._load_image(img)\n # tiles\n for tile in tile_set.tiles:\n for img in tile.images:\n if not img.content and not img.source:\n # only image id set\n indexed_img = tile_set.indexed_images[img.id]\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)\n else:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n indexed_img = self._load_image(img)\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)", "def load(self, image_loader):\n self._image_loader = image_loader\n for tile_set in self.tile_sets:\n # do images first, because tiles could reference it\n for img in tile_set.images:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n tile_set.indexed_images[img.id] = self._load_image(img)\n # tiles\n for tile in tile_set.tiles:\n for img in tile.images:\n if not img.content and not img.source:\n # only image id set\n indexed_img = tile_set.indexed_images[img.id]\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)\n else:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n indexed_img = self._load_image(img)\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)", "def read_images(folder):\n distinct_frames = DistinctFrames()\n\n for file in sorted(sorted(os.listdir(folder)),\n key=len): # sorting files on basis of 1) length and 2) numerical order\n '''\n Sorting is done 2 times because\n if files in the folder are\n 1. image100.pkl\n 2. image22.pkl\n 3. image21.pkl\n firstly sort them to image100.pkl,image21.pkl,image22.pkl then according to length to image21.pkl,image22.pkl,image100.pkl\n '''\n try:\n img_obj = load_from_memory(file, folder)\n time_stamp = img_obj.get_time()\n distinct_frames.add_img_obj(img_obj)\n print(\"Reading image ..\" + str(time_stamp) + \" from \" + folder) # for debug purpose\n except:\n # exception will occur for files like .DS_Store and jpg directory\n continue\n\n if distinct_frames.no_of_frames() != 0:\n distinct_frames.calculate_time()\n\n return distinct_frames", "def get_images(self, start_at=None, count=None):\n start_at = 0 if start_at is None else start_at\n end_at = len(self.fps) if count is None else start_at+count\n for fp in self.fps[start_at:end_at]:\n try:\n image = ndimage.imread(fp, mode=\"RGB\")\n except IOError as exc:\n image = None\n yield image", "def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)", "def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images", "def _initWithTiffTools(self): # noqa\n dir0 = TiledTiffDirectory(self._largeImagePath, 0)\n self.tileWidth = dir0.tileWidth\n self.tileHeight = dir0.tileHeight\n self.sizeX = dir0.imageWidth\n self.sizeY = dir0.imageHeight\n self.levels = max(1, int(math.ceil(math.log(max(\n dir0.imageWidth / dir0.tileWidth,\n dir0.imageHeight / dir0.tileHeight)) / math.log(2))) + 1)\n sampleformat = dir0._tiffInfo.get('sampleformat')\n bitspersample = dir0._tiffInfo.get('bitspersample')\n self._dtype = np.dtype('%s%d' % (\n tifftools.constants.SampleFormat[sampleformat or 1].name,\n bitspersample,\n ))\n self._bandCount = dir0._tiffInfo.get('samplesperpixel')\n info = _cached_read_tiff(self._largeImagePath)\n frames = []\n associated = [] # for now, a list of directories\n curframe = -1\n for idx, ifd in enumerate(info['ifds']):\n # if not tiles, add to associated images\n if tifftools.Tag.tileWidth.value not in ifd['tags']:\n associated.append(idx)\n continue\n level = self._levelFromIfd(ifd, info['ifds'][0])\n # if the same resolution as the main image, add a frame\n if level == self.levels - 1:\n curframe += 1\n frames.append({'dirs': [None] * self.levels})\n frames[-1]['dirs'][-1] = (idx, 0)\n try:\n frameMetadata = json.loads(\n ifd['tags'][tifftools.Tag.ImageDescription.value]['data'])\n for key in {'channels', 'frame'}:\n if key in frameMetadata:\n frames[-1][key] = frameMetadata[key]\n except Exception:\n pass\n if tifftools.Tag.ICCProfile.value in ifd['tags']:\n if not hasattr(self, '_iccprofiles'):\n self._iccprofiles = []\n while len(self._iccprofiles) < len(frames) - 1:\n self._iccprofiles.append(None)\n self._iccprofiles.append(ifd['tags'][\n tifftools.Tag.ICCProfile.value]['data'])\n # otherwise, add to the first frame missing that level\n elif level < self.levels - 1 and any(\n frame for frame in frames if frame['dirs'][level] is None):\n frames[next(\n idx for idx, frame in enumerate(frames) if frame['dirs'][level] is None\n )]['dirs'][level] = (idx, 0)\n else:\n msg = 'Tile layers are in a surprising order'\n raise TileSourceError(msg)\n # if there are sub ifds, add them\n if tifftools.Tag.SubIfd.value in ifd['tags']:\n for subidx, subifds in enumerate(ifd['tags'][tifftools.Tag.SubIfd.value]['ifds']):\n if len(subifds) != 1:\n msg = 'When stored in subifds, each subifd should be a single ifd.'\n raise TileSourceError(msg)\n level = self._levelFromIfd(subifds[0], info['ifds'][0])\n if level < self.levels - 1 and frames[-1]['dirs'][level] is None:\n frames[-1]['dirs'][level] = (idx, subidx + 1)\n else:\n msg = 'Tile layers are in a surprising order'\n raise TileSourceError(msg)\n self._associatedImages = {}\n for dirNum in associated:\n self._addAssociatedImage(self._largeImagePath, dirNum)\n self._frames = frames\n self._tiffDirectories = [\n TiledTiffDirectory(\n self._largeImagePath,\n frames[0]['dirs'][idx][0],\n subDirectoryNum=frames[0]['dirs'][idx][1])\n if frames[0]['dirs'][idx] is not None else None\n for idx in range(self.levels - 1)]\n self._tiffDirectories.append(dir0)\n self._checkForInefficientDirectories()\n self._checkForVendorSpecificTags()\n return True", "def load_from_folder(path):\n images = []\n files = os.listdir(path)\n files.sort()\n for file in tqdm(files):\n images.append(io.imread(path + file))\n return images", "def _locate_images(self):\r\n extensions = '|'.join(self.valid_extensions)\r\n extension_re = re.compile('.+\\.(%s)$' % extensions, re.IGNORECASE)\r\n files = sorted(os.listdir(self.path))\r\n\r\n images = []\r\n for root, dirs, files in os.walk(self.path, followlinks=self.config['follow_links']):\r\n for filename in sorted(files):\r\n if not filename.startswith('.') and extension_re.match(filename):\r\n images.append(Image(path=os.path.join(root, filename), config=self.config))\r\n if not self.config['recursive']:\r\n break\r\n\r\n if not images:\r\n raise SourceImagesNotFoundError(self.path)\r\n\r\n images = sorted(images, reverse=self.config['algorithm_ordering'][0] != '-')\r\n\r\n return images", "def load_images(self):\n self.img_paths = sorted(glob(self.img_pattern))\n self.imgs = []\n for idx, this_path in enumerate(self.img_paths):\n try:\n this_img = cv2.imread(this_path)\n if self.downscale > 1:\n this_img = cv2.resize(this_img, (0, 0),\n fx=1/float(self.downscale),\n fy=1/float(self.downscale),\n interpolation=cv2.INTER_LINEAR)\n except Exception as e:\n print(\"error loading img: %s\" % (this_path))\n if this_img is not None:\n self.imgs.append(this_img)\n print(\"loaded img %d size=(%d,%d): %s\" %\n (idx, this_img.shape[0], this_img.shape[1], this_path))\n print(\"loaded %d images\" % (len(self.imgs)))", "def display_imgs(img_dir,img_list):\n for img in img_list:\n display_img(img_dir, img)", "def load_images(path):\n images = []\n images_names = []\n \n for file_name in os.listdir(path):\n image_name = file_name\n images_names.append(image_name)\n images_names = sorted(images_names) #use sort to insure linux file sys behaves\n print(images_names) #check for proper order\n\n for file_name in images_names:\n image = pygame.image.load(path + os.sep + file_name).convert()\n images.append(image)\n return images", "def load_images(filelist):\n # pixel value range 0-255\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('L')\n return np.array(im).reshape(1, im.size[1], im.size[0], 1)\n data = []\n for file in filelist:\n im = Image.open(file).convert('L')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1))\n return data", "def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def process(directory):\n files = []\n\n options = [\"Load\", \"Create\"]\n choice = options[int(ui.prompt(options=options))]\n\n for item in os.listdir(directory):\n if os.path.isfile(os.path.join(directory, item)):\n filename = os.path.join(directory, item)\n if choice == \"Load\" and item.endswith(\".png\"):\n files.append(filename)\n elif choice == \"Create\" and item.endswith(\".file\"):\n files.append(filename)\n\n filenames, pageNames = imagePages(files, choice)\n \n targets = [name.split('/')[-1][:5] for name in filenames]\n return pageNames, targets, filenames", "def load_images(self, tmx):\n for image_data in tmx.images:\n if image_data:\n image, _, _ = image_data\n self.load_image(image)", "def extract_from_dir(directory):\n image_regex = re.compile(r'.+\\.jpeg$')\n for root, _, files in os.walk(directory):\n for name in files:\n if image_regex.match(name) != None:\n filename = os.path.join(root, name)\n image = io.imread(filename)\n no_ext, _ = os.path.splitext(name)\n features = extract(image)\n yield (no_ext, features)", "def get_images(path_list):\n images = []\n labels = []\n names = []\n i = 0\n for path in path_list:\n for fruit_dir_path in glob.glob(path):\n fruit_label = fruit_dir_path.split(\"/\")[-1]\n for image_path in glob.glob(os.path.join(fruit_dir_path, \"*.jpg\")):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n\n image = cv2.resize(image, (45, 45))\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n images.append(image)\n names.append(fruit_label)\n labels.append(i)\n i += 1\n\n images = np.array(images)\n print(images.shape)\n # add a new dimension here\n with np.nditer(images, op_flags=['readwrite']) as it:\n for x in it:\n x = np.expand_dims(x, axis=0)\n labels = np.array(labels)\n return images, labels, i", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X = []\n y = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y.append(label)\n X.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X,y", "def _getImagesFromDirectory(self, directoryPath):\n files = [f for f in listdir(directoryPath)\n if isfile(join(directoryPath, f))]\n for filePath in files:\n self._imageDictionary[filePath] = image.load(\n self._formatPath(directoryPath, filePath))", "def load_data(data_dir):\n\n # Initiate lists\n images = []\n labels = []\n\n main_dir = os.path.abspath(os.curdir)\n\n for i in range(NUM_CATEGORIES):\n os.chdir(os.path.join(data_dir, str(i))) # Open directory i\n dir_images = os.listdir() # Create a list of all images in directory\n\n for j in range(len(dir_images)):\n image = cv2.imread(dir_images[j]) # Read image from file\n image = tf.keras.preprocessing.image.img_to_array(image) # Transform image to numpy array\n image = tf.image.resize(image, (IMG_WIDTH, IMG_HEIGHT)) # Reshape image to 30 x 30 px\n image = image/255 # Normalize image RGB values\n images.append(image) \n labels.append(i)\n\n os.chdir(main_dir)\n \n return (images, labels)", "def all_loci():\n for fname in listdir(join(DATA_PATH, 'loci')):\n try:\n yield fetch_locus(fname)\n except Exception as e:\n print(f'{repr(e)} fetching {fname}')", "def get_img_files(images, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img') \n locs = db.get_img_locs(images)\n titles = db.get_location_titles()\n returnval = []\n for image in images:\n loc = locs[image]\n if loc is None:\n raise ValueError('The image %s could not be found' % image)\n returnval.append(path.join(img_dir, titles[loc], str(image) + '.jpg'))\n return returnval", "def _iter_images(self):\n for image in self._images:\n yield image", "def display_images(filenames):\n for filename in filenames:\n display(Image(filename))", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list", "def get_images(self):\n return [self.get_image(i) for i in range(0, self.number_sprites - 1)]", "def load(self, dirname):\n loaded_filenames = set()\n ini_filename = os.path.join(dirname, \"xpresser.ini\")\n if os.path.exists(ini_filename):\n config = ConfigParser.ConfigParser()\n config.read(ini_filename)\n for section_name in config.sections():\n if section_name.startswith(\"image \"):\n image_name = section_name.split(None, 1)[1]\n try:\n image_filename = config.get(section_name, \"filename\")\n except ConfigParser.NoOptionError:\n raise ImageDirError(\"Image %s missing filename option\"\n % image_name)\n image_filename = os.path.join(dirname, image_filename)\n if not os.path.exists(image_filename):\n raise ImageDirError(\"Image %s file not found: %s\" %\n (image_name, image_filename))\n try:\n image_similarity = config.getfloat(section_name,\n \"similarity\")\n except ConfigParser.NoOptionError:\n image_similarity = None\n except ValueError:\n value = config.get(section_name, \"similarity\")\n raise ImageDirError(\"Image %s has bad similarity: %s\"\n % (image_name, value))\n \n try:\n value = config.get(section_name, \"focus_delta\")\n match = CLICK_POSITION_RE.match(value)\n if not match:\n raise ImageDirError(\"Image %s has invalid click \"\n \"position: %s\" %\n (image_name, value))\n image_focus_delta = (int(match.group(\"x\")),\n int(match.group(\"y\")))\n except ConfigParser.NoOptionError:\n image_focus_delta = None\n image = Image(name=image_name,\n filename=image_filename,\n similarity=image_similarity,\n focus_delta=image_focus_delta)\n self._images[image_name] = image\n loaded_filenames.add(image_filename)\n\n # Load any other images implicitly with the default arguments.\n for basename in os.listdir(dirname):\n filename = os.path.join(dirname, basename)\n if filename not in loaded_filenames:\n ftype, fencoding = mimetypes.guess_type(filename)\n if ftype and ftype.startswith(\"image/\"):\n image_name = os.path.splitext(basename)[0]\n self._images[image_name] = Image(name=image_name,\n filename=filename)", "def load_images(file):\n\timage_list = [] # List for storing all the images\n\ttargets = []\n\t\n\tfor filename in glob.glob(file + '/*.png'):\n\t\t# ==================\n\t\t# Reading the image\n\t\t# ==================\n\t\timage = scipy.misc.imread(filename).astype(np.float32)\n\t\t\n\t\t# ================================\n\t\t# Converting the image to a vector\n\t\t# ================================\n\t\timage = image.flatten() # (784, )\n\t\t\n\t\t# ==============================\n\t\t# Normalizing the image to numpy\n\t\t# ==============================\n\t\timage = image / 255.0\n\t\timage = image - 0.5\n\t\timage = image * 2.0\n\t\t\n\t\t# ===============================\n\t\t# Appending the image to the list\n\t\t# ===============================\n\t\timage_list.append(image)\n\t\t\n\t\t_, value = filename.split('\\\\')\n\t\t# print(value[0])\n\t\ttargets.append(int(value[0]))\n\t\n\timage_list = np.array(image_list)\n\ttargets = np.array(targets)\n\t\n\t# ================================================\n\t# \t\t\tShuffling the data\n\t# ================================================\n\timage_list, targets = shuffle(image_list, targets)\n\t\n\ttrain_images, test_images, train_targets, test_targets = split(image_list, targets)\n\treturn train_images, test_images, train_targets, test_targets", "def list_images(path=['.']):\n for image_dir in set(path):\n if not os.path.isdir(image_dir):\n continue\n for filename in os.listdir(image_dir):\n bname, ext = os.path.splitext(filename)\n if ext.lower() not in VALID_IMAGE_EXTS:\n continue\n\n filepath = os.path.join(image_dir, filename)\n yield strutils.decode(filepath)", "def get_images_of_folder(folder):\n\n Settings.dev_print(\"getting images of folder: {}\".format(folder.get_title()))\n if not folder: return []\n imgs = []\n files = []\n valid_images = [\".jpg\",\".gif\",\".png\",\".tga\",\".jpeg\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_images:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"image path: {}\".format(os.path.join(folder.get_path(),f)))\n return files", "def build_tiles(cls):\n\n LOGGER.debug(\"Building tiles\")\n\n for tile_id in tiledata.TILE_DATA:\n if not Tile.tile_factory(tile_id):\n LOGGER.error(\"Could not construct tile with ID %d\", tile_id)\n sys.exit(1)", "def webtiles_from_all_geotiffs(self, update_ranges=True, overwrite=True):\n geotiff_paths = self.tiles.get_filenames_from_dir('geotiff')\n self.webtiles_from_geotiffs(geotiff_paths, update_ranges, overwrite)", "def list_images(img_dir) -> Iterable[str]:\n extensions = (\".png\", \".jpg\", \".jpeg\", \".tif\", \".tiff\")\n\n paths = Path(img_dir).glob(\"**/*\")\n paths = filter(lambda p: p.is_file() and p.suffix.lower() in extensions, paths)\n return (str(p) for p in paths)", "def load_groundtruths(folder_path, num_images):\n imgs = []\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n # See if it is better to use dtype = int\n hot_img = convert_image_to_hot(img)\n imgs.append(hot_img)\n else:\n print('File ' + image_path + ' does not exist')\n #imgs = np.around(imgs) # Uncomment if we want to round values.\n imgs_array = np.asarray(imgs)\n return imgs_array", "def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def getfiles_from_dir(self,dir):\n assert not os.path.isdir(dir),\"Invalid dir format\"+str(dir)\n print(\"-----Read Dir :\",dir)\n self.files=glob.glob(os.path.join(dir,\"./*.tif\"))", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X_test = []\n y_test = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y_test.append(label)\n X_test.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X_test,y_test", "def load_images(folder_path):\n images = []\n # first make image paths list\n # cv2 can take in wildcard args if using glob\n image_paths = glob.glob(folder_path + \"/*\")\n for path in image_paths:\n images.append(cv2.imread(path))\n return (images, image_paths)", "def initialize_observation_files(tile_file_list):\n n_tiles = len(tile_file_list)\n if(n_tiles>0):\n for tile_file in tile_file_list:\n target_tile_pack = util.TargetTile(tile_file) \n target_tile_pack.write_results_to_file(tile_file)\n return", "def tile_slides(slides_filepaths, desired_tile_with, desired_overlap, desired_magnification):\n containing_folders = []\n for slide_filepath in slides_filepaths:\n containing_folders.append(tile_slide(slide_filepath, desired_tile_with, desired_overlap, desired_magnification))\n return containing_folders", "def fastset(self):\n for d in dirlist(os.path.join(self.datadir)):\n for f in imlist(d):\n yield ImageDetection(filename=f, category=filebase(d))", "def load_many_images(paths):\r\n \r\n lpop = __g.pop\r\n \r\n for k in __g.keys()[1:]:\r\n lpop(k)\r\n \r\n if type(paths) == str or type(paths) == tuple and len(paths) == 2 and type(paths[0]) == int:\r\n __g[1] = Surface(paths)\r\n elif type(paths) == list:\r\n for p in range(1, len(paths) + 1):\r\n __g[p] = Surface(paths[p-1])", "def getimagelist(folder):\n imagefolder = Path(folder) \n imagelist = imagefolder.glob(\"**/*.png\") \n return list(imagelist)", "def read_images(imagedir, size, ncores=mp.cpu_count()):\n _f = functools.partial(_image_worker, size=size)\n with mp.Pool(ncores) as pool:\n ret = pool.map(_f, get_files(imagedir))\n return {k: v for k,v in ret if v is not None}", "def getImages(self,Project=\"\"):\n #images = [\"image1.jpg\",\"image2.jpg\",\"image3.jpg\"]\n \n os.chdir(self.dataDir)\n images = glob.glob(\"*.png\")\n \n return images", "def get_imgs(paths_list: list) -> list:\n \n imgs_list = [Image.open(project_path + data_path + paths_list[i]) for i in range(len(paths_list))]\n \n return imgs_list", "def get_images(path, ext=\".jpg\"):\n return get_files(path, ext)", "def get_tile_mapping(image_names):\n tile_map = {}\n tile_num = 0\n\n # iterate over all files\n for file_name in image_names:\n with BioReader(file_name) as br:\n \n # iterate over tiles\n for x in range(0,br.X,tile_size):\n x_max = min([br.X,x+tile_size])\n for y in range(0,br.Y, tile_size):\n y_max = min([br.Y,y+tile_size])\n\n # add tile to tile_map\n tile_map[tile_num] = (file_name, (x,x_max), (y,y_max))\n tile_num+=1\n return tile_map", "def loadFromFolder(self):\n stackData = tifffile.imread(self.path + '/*.tif')\n numChannels = stackData.shape[1] # assuming [slices][channels][x][y]\n numSlices = stackData.shape[0] # assuming [slices][channels][x][y]\n self._numChannels = numChannels\n self.header.header['numImages'] = numSlices\n print('loadFromFolder() stackData:', stackData.shape)\n for channel in range(numChannels):\n self._stackList[channel] = stackData[:, channel, :, :]\n self._makeMax(channel)", "def get_images(fish):\n fish_dir = TRAIN_DIR+'{}'.format(fish)\n images = [fish+'/'+im for im in os.listdir(fish_dir)]\n return images", "def loadimages(root):\n imgs = []\n\n def add_json_files(path,):\n for imgpath in glob.glob(path+\"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('png',\"json\")))\n for imgpath in glob.glob(path+\"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('jpg',\"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path) \n if os.path.isdir(os.path.join(path,o))]\n if len(folders)>0:\n for path_entry in folders: \n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def getFeatures(filedir):\r\n lbs = getLabels(filedir)\r\n width, height = getSize(filedir)\r\n features = [os.listdir(filedir + 's' + str(lbs[i])) for i in range(len(lbs))]\r\n for i in range(len(lbs)):\r\n for j in range(len(features[i])):\r\n im = Image.open(filedir + 's' + str(lbs[i]) + '/' + features[i][j]) # type(im): <class 'PIL.PpmImagePlugin.PpmImageFIle'>\r\n im = im.convert('L') # type(im): <class 'PIL.Image.Image'>\r\n data = im.getdata() # type(data): <class 'ImagingCore'>\r\n img = np.reshape(list(data), (height, width))\r\n features[i][j] = img\r\n return features", "def _load_images_and_labels(image_dir):\n\n print('Extracting images from: ', image_dir)\n\n image_paths = _load_image_paths(image_dir)\n images = _extract_images(image_paths)\n num_images = len(image_paths)\n labels = np.ones(num_images, dtype=np.int64)\n\n return images, labels", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def test_generator(self, test_path):\n\n img_list = os.scandir(test_path)\n for img_entry in img_list:\n\n img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])\n if img.shape[-1] == 3:\n orig_shape = img.shape[-2::-1]\n else:\n orig_shape = img.shape[::-1]\n\n\n img = cv2.resize(img, tuple(self.target_size))\n img = img / 255\n if self.color_mode == \"grayscale\":\n img = np.reshape(img, img.shape + (1,))\n img = np.reshape(img, (1,) + img.shape)\n yield img, img_entry, orig_shape", "def open(*args, **kwargs):\n return GDALFileTileSource(*args, **kwargs)", "def list_images(self):\n raise NotImplementedError()", "def icons_from_folder(folder, resolution=None, col=None,\n cmap=None, border_type=None, border_width=2,\n make_square=False, circ_cut=None):\n icons = dict()\n for filename in os.listdir(folder):\n try:\n im = PIL.Image.open(filename)\n icons[filename] = Icon(\n image=im, col=col, resolution=resolution,\n cmap=cmap, border_type=border_type,\n border_width=border_width,\n make_square=make_square, circ_cut=circ_cut)\n except (FileNotFoundError, UnidentifiedImageError, IsADirectoryError,\n PermissionError):\n pass\n return icons", "def __init__(self, path, **kwargs): # noqa\n super().__init__(path, **kwargs)\n\n self._largeImagePath = str(self._getLargeImagePath())\n\n try:\n self._initWithTiffTools()\n return\n except Exception as exc:\n config.getConfig('logger').debug('Cannot read with tifftools route; %r', exc)\n\n try:\n alldir = self._scanDirectories()\n except IOOpenTiffError:\n msg = 'File cannot be opened via tiff source.'\n raise TileSourceError(msg)\n except (ValidationTiffError, TiffError) as exc:\n alldir = []\n lastException = exc\n\n # If there are no tiled images, raise an exception.\n if not len(alldir):\n if not os.path.isfile(self._largeImagePath):\n raise TileSourceFileNotFoundError(self._largeImagePath) from None\n msg = \"File %s didn't meet requirements for tile source: %s\" % (\n self._largeImagePath, lastException)\n config.getConfig('logger').debug(msg)\n raise TileSourceError(msg)\n # Sort the known directories by image area (width * height). Given\n # equal area, sort by the level.\n alldir.sort()\n # The highest resolution image is our preferred image\n highest = alldir[-1][-1]\n directories = {}\n # Discard any images that use a different tiling scheme than our\n # preferred image\n for tdir in alldir:\n td = tdir[-1]\n level = tdir[2]\n if (td.tileWidth != highest.tileWidth or\n td.tileHeight != highest.tileHeight):\n if not len(self._associatedImages):\n self._addAssociatedImage(self._largeImagePath, tdir[-2], True, highest)\n continue\n # If a layer's image is not a multiple of the tile size, it should\n # be near a power of two of the highest resolution image.\n if (((td.imageWidth % td.tileWidth) and\n not nearPowerOfTwo(td.imageWidth, highest.imageWidth)) or\n ((td.imageHeight % td.tileHeight) and\n not nearPowerOfTwo(td.imageHeight, highest.imageHeight))):\n continue\n # If a layer is a multiple of the tile size, the number of tiles\n # should be a power of two rounded up from the primary.\n if (not (td.imageWidth % td.tileWidth) and not (td.imageHeight % td.tileHeight)):\n htw = highest.imageWidth // td.tileWidth\n hth = highest.imageHeight // td.tileHeight\n ttw = td.imageWidth // td.tileWidth\n tth = td.imageHeight // td.tileHeight\n while (htw > ttw and htw > 1) or (hth > tth and hth > 1):\n htw = (htw + 1) // 2\n hth = (hth + 1) // 2\n if htw != ttw or hth != tth:\n continue\n directories[level] = td\n if not len(directories) or (len(directories) < 2 and max(directories.keys()) + 1 > 4):\n msg = 'Tiff image must have at least two levels.'\n raise TileSourceError(msg)\n\n sampleformat = highest._tiffInfo.get('sampleformat')\n bitspersample = highest._tiffInfo.get('bitspersample')\n self._dtype = np.dtype('%s%d' % (\n tifftools.constants.SampleFormat[sampleformat or 1].name,\n bitspersample,\n ))\n self._bandCount = highest._tiffInfo.get('samplesperpixel')\n # Sort the directories so that the highest resolution is the last one;\n # if a level is missing, put a None value in its place.\n self._tiffDirectories = [directories.get(key) for key in\n range(max(directories.keys()) + 1)]\n self.tileWidth = highest.tileWidth\n self.tileHeight = highest.tileHeight\n self.levels = len(self._tiffDirectories)\n self.sizeX = highest.imageWidth\n self.sizeY = highest.imageHeight\n self._checkForInefficientDirectories()\n self._checkForVendorSpecificTags()", "def load_set(directName, n = np.inf):\n # Loaded a set of images\n\n files = os.listdir(directName)\n n = min(n, len(files))\n #n = len(files)\n print(\"Loading \" + str(n) + \" images\")\n imgs = [mpimg.imread(directName + files[i]) for i in range(n)]\n\n return imgs", "def _open_images(training_filenames, path):\n imagePaths=[os.path.join(path,f) for f in training_filenames]\n faces=[]\n for i, imagePath in enumerate(imagePaths):\n faceImg=Image.open(imagePath).convert('L')\n faceNp=np.array(faceImg,'uint8')\n faces.append(faceNp)\n return faces", "def loadImages(files, targets):\n images = []\n for file in files:\n targets.append(file)\n images.append(snd.imread(file))\n return images, targets", "def open(self, path):\n\n # abre el tilemap en formato JSON\n data = JSON.open(path)\n\n # número de tiles en 'x' y 'y'\n self.width = data['width']\n self.height = data['height']\n\n # ancho y alto de los tiles\n self.tilewidth = data['tilewidth']\n self.tileheight = data['tileheight']\n\n # calcula las dimensiones del tilemap en pixeles\n self.rect.w = self.width * self.tilewidth\n self.rect.h = self.height * self.tileheight\n\n # extrae los tilesets\n tilesets = self.tilesets\n for tileset_node in data['tilesets']:\n tileset = TiledTileset(tileset_node, path)\n tilesets.append(tileset)\n self.split_tileset(tileset)\n\n # extrae las capas (layers)\n layers = self.layers\n for layer_node in data['layers']:\n layer = TiledLayer(layer_node)\n layers.append(layer)\n self.arrange_tiles(layer)", "def load_sample_images():\n # Try to import imread from scipy. We do this lazily here to prevent\n # this module from depending on PIL.\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n except ImportError:\n raise ImportError(\"The Python Imaging Library (PIL) \"\n \"is required to load data from jpeg files\")\n ROOT_Dir = os.getcwd()\n module_path = os.path.join(ROOT_Dir, \"images\")\n with open(os.path.join(module_path, 'README.txt')) as f:\n descr = f.read()\n filenames = [os.path.join(module_path, filename)\n for filename in os.listdir(module_path)\n if filename.endswith(\".jpg\")]\n # Load image data for each image in the source folder.\n images = [imread(filename) for filename in filenames]\n\n return Bunch(images=images,\n filenames=filenames,\n DESCR=descr)" ]
[ "0.67336595", "0.6599813", "0.6416653", "0.63725936", "0.61642754", "0.6014441", "0.5978408", "0.59570676", "0.5940081", "0.5896447", "0.58877945", "0.5837525", "0.5792128", "0.5787893", "0.5751572", "0.57426775", "0.5739392", "0.57090163", "0.57005966", "0.56994253", "0.56881416", "0.5686692", "0.5685227", "0.568453", "0.5681492", "0.568129", "0.56698793", "0.5639855", "0.5638527", "0.56049585", "0.55958617", "0.55908", "0.5545985", "0.5544191", "0.5541913", "0.5541913", "0.5541899", "0.5538325", "0.5534499", "0.5530544", "0.5526134", "0.55074805", "0.5506088", "0.5497357", "0.54957", "0.5482994", "0.5474516", "0.5444184", "0.54435027", "0.54371053", "0.54346293", "0.5431476", "0.5423274", "0.540959", "0.54090935", "0.5405395", "0.54020447", "0.5397869", "0.53973615", "0.5392563", "0.5368993", "0.5368476", "0.53673154", "0.53649175", "0.53645283", "0.53600943", "0.5358369", "0.53575385", "0.5357478", "0.5355481", "0.5350735", "0.5349654", "0.5345017", "0.5344747", "0.53415906", "0.5340025", "0.53398055", "0.5327693", "0.53267545", "0.5324567", "0.53239596", "0.5321485", "0.5308633", "0.53023994", "0.5302065", "0.5301387", "0.52943647", "0.52940494", "0.52900666", "0.528902", "0.5286109", "0.5285092", "0.5284145", "0.5273192", "0.52667576", "0.5260551", "0.5251084", "0.5247432", "0.52433026", "0.5239051" ]
0.81790555
0
If a resource has no title, it's ID should be returned.
def test_str_no_title(media_resource_factory): resource = media_resource_factory() assert str(resource) == str(resource.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resourceid(self):", "def resource_id(self) -> Optional[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def get_resource_id(resource_instance, resource):\n if resource and \"id\" in resource:\n return resource[\"id\"] and encoding.force_str(resource[\"id\"]) or None\n if resource_instance:\n return (\n hasattr(resource_instance, \"pk\")\n and encoding.force_str(resource_instance.pk)\n or None\n )\n return None", "def get_resource_id(self, obj):\n return obj.id", "def get_id(self, resource):\n try:\n return resource.href.split('/')[-1]\n except AttributeError:\n return resource['href'].split('/')[-1]", "def get_object_id(resource):\n if hasattr(resource, \"object_id\"):\n return int(resource.object_id)\n\n return int(resource.id)", "def getTitle(self, item):\n return item.Title() or item.getId()", "def resourceDocumentId(self, resource: Resource) -> str:", "def id(self):\n return self.raw_resource[\"id\"]", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def _get_ID(self):\n raw_data = imdb.search_for_title(self.title)\n if len(raw_data) > 1:\n raw_data = raw_data[0] # Pulls the first value of the title (the closest match)\n # if there is more than one\n self.ID = raw_data['imdb_id']", "def title_or_id(context):\n title = getattr(context, 'title', '')\n if not title:\n if hasattr(context, '__name__'):\n title = getattr(context, '__name__', '')\n elif hasattr(context, 'getId'):\n title = context.getId()\n return title", "def _get_title_id(cursor, title):\n # run query to find title id for given title\n title_id_query = cursor.execute(dbq.SELECT_TITLE_ID, [title])\n\n if title_id_query:\n return _fetch_value(cursor)\n else:\n return None", "def title_by_id(id_: int) -> Any:\n post = Posts.query.filter_by(id=id_).first()\n if post is None:\n return \"404\"\n return post.title", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_link_title(self):\n return self.request.POST.get(\"resource_link_title\", self.resource_link_id)", "def id_by_title(self, title):\n logging.debug('id_by_title(%s)', title)\n if not self.list_loaded_:\n self.load_shows()\n\n for show_id in self.shows_data:\n next_show = self.shows_data[show_id]\n logging.debug('id_by_title(%s) = %s', next_show['title'], show_id)\n if next_show['title'] == title:\n logging.debug('Found id_by_title(%s) = %s', title, show_id)\n return show_id\n\n print('Unknown title - {0}'.format(title))\n sys.exit(1)", "def get_title_by_id(id):\n\n # your code", "def _http_get_title_by_id(self, id) -> dict:\n if int(id) == -1:\n # there is no title\n return None\n playl = self._http_playlist()\n return [title for title in playl if int(title['id']) == int(id)][0]", "def get_primary_key(self, request):\n queryset = self.get_queryset()\n url = request.data['url']\n try:\n article = get_object_or_404(queryset, url=url)\n return Response({'primary_key': article.identifier})\n except Http404 as e:\n return Response({'error': str(e)})", "def PolonaGetFirst(title:str):\n URL='https://polona.pl/api/entities/'\n PARAMS={'query':title, 'size':'1', 'public':'1'}\n\n r = requests.get(URL, PARAMS)\n data = r.json()\n e_id = data['hits'][0]['id']\n return e_id", "def safe_title(self):\n try:\n return self.title\n except ObjectDoesNotExist:\n return None", "def get_title(self, region, namespace, id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/title/{0}', region, *[id], **filters)", "def test_resource_id(self):\n resource_id = '1234-1234-1234'\n\n # resource id does not exist\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 404)\n\n # create a resource ID\n resp = self.app.post(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n # resource id exists\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n # cannot create twice\n resp = self.app.post(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 409)\n\n # delete resource id\n resp = self.app.delete(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n # resource id does not exist\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 404)", "def get_by_title(self, title):\n return Field(self.context,\n ResourcePathServiceOperation(\"getByTitle\", [title], self.resource_path))", "def id(self):\n return self.raw_resource.uuid", "def get_resource_id(self, obj):\n return obj.video.id", "def res_title(self):\n return self.get(\"res_title\", default=None, decode=True)", "def get_title(self):\n title = self.title\n if not title and self.parent_id:\n title = self.parent.title\n return title", "def title(self) -> Optional[str]:\n if self._title is not None:\n return self._title\n if self._target_object is not None and isinstance(\n self._target_object, pystac.Catalog\n ):\n return self._target_object.title\n return None", "def find_resource_by_name_or_id(self, resource_name, value):\n try:\n entity = getattr(self.client(), resource_name)\n return entity.get(value).id\n except sahara_base.APIException:\n return self.find_resource_by_name(resource_name, value)", "def get_title(cls, obj, **kwargs):\n if isinstance(obj.data, dict):\n titles = filter(None, get_value(obj.data, \"titles.title\", []))\n if titles:\n # Show first title that evaluates to True\n return titles[0]\n return \"No title available\"", "def get_by_natural_key(self, title):\n try:\n return self.get(title=title)\n except ObjectDoesNotExist:\n logging.getLogger(self.__module__).error('%s \"%s\" does not exist',\n self.model.__name__, title)", "def find_resource(self, resource_name, package_title=None):\n metadata = self.get_ckan_metadata()\n results = []\n for id, resource in metadata.items():\n if resource['name'] == resource_name:\n if package_title is None or resource['dataset']['title'] == package_title:\n results.append(resource)\n return results[0] if len(results) == 1 else results", "def get_id(self, item):\n return self.exist_products.get(hash(item['title']))", "def title(self) -> Optional[str]:\n return self.get(\"/Title\")", "def resource_id(self, value, match_option=None):\n return self.attributes(\"resource-id\", value, match_option)", "def get_title(self) -> Optional[str]:\n return self.title", "def source_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"source_resource_id\")", "def source_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"source_resource_id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def acs_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"acs_resource_id\")", "def get_one(self, resource_id):\r\n authorized_project = acl.get_limited_to_project(pecan.request.headers)\r\n resources = list(pecan.request.storage_conn.get_resources(\r\n resource=resource_id, project=authorized_project))\r\n if not resources:\r\n raise EntityNotFound(_('Resource'), resource_id)\r\n return Resource.from_db_and_links(resources[0],\r\n self._resource_links(resource_id))", "def getTitle(self):\n cmdId = self.executeCommand(Command.GET_TITLE)\n return cmdId", "def id(self):\n return self.get('id')", "def get_primary_id(self):", "def get_object(self, pk):\n try:\n return JobTitle.objects.get(Q(id=pk) | Q(uid=pk))\n except JobTitle.DoesNotExist:\n raise Http404", "def get_objectID(self):\n return self.resource.uuid", "def resource_name(self) -> Optional[str]:\n return pulumi.get(self, \"resource_name\")", "def resource_name(self) -> Optional[str]:\n return pulumi.get(self, \"resource_name\")", "def get_resource_slug(self):\n if hasattr(self, '_resource_slug'):\n return self._resource_slug\n return slugify(self.get_resource_name())", "def get_id(self) -> Optional[str]:\n return self.id_", "def _get_id(self):\n return self.id", "def get_snippet(self, title=None):\n for snippet in self.snippets:\n if snippet[\"title\"] == title:\n return snippet\n return None", "def target_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_resource_id\")", "def get_title_id(title=_(\"Name the element\"),\n text=_(\"Choose a name for the element\"),\n element_title=None,\n element_id=None):\n d = title_id_dialog(title=title,\n element_title=element_title,\n element_id=element_id,\n text=text)\n d.show_all()\n center_on_mouse(d)\n\n res=d.run()\n if res == gtk.RESPONSE_OK:\n try:\n t=unicode(d.title_entry.get_text())\n i=unicode(d.id_entry.get_text())\n except ValueError:\n t=None\n i=None\n else:\n t=None\n i=None\n\n d.destroy()\n\n return t, i", "def id(self) -> typing.Optional[str]:\n return self._values.get('id')", "def id(self) -> typing.Optional[str]:\n return self._values.get('id')", "def get_title_by_id(id):\n\n sales_data = data_manager.get_table_from_file(\"sales/sales.csv\")\n for line in sales_data:\n if line[ID] == id:\n return line[TITLE]\n return None", "def id(self):\n return self.get_data(\"id\")", "def media_content_id(self):\n if 'current_title' in self._status:\n return self._status['current_title']", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def Title(self, default={}):\n return HEP.TitleObject(self.data.get('title', default))", "def test_get_id(self):\n\n self.metadata.create_or_update(data=self.create)\n\n # First pick up by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n # Then fetch by ID\n res = self.metadata.get_by_id(entity=Dashboard, entity_id=res_name.id)\n\n self.assertEqual(res_name.id, res.id)", "def get_initial_resource(client, api_id):\n response = client.get_resources(\n restApiId=api_id\n )\n return response['items'][0]", "def getID():", "def id(self) -> Optional[int]:\n return self.__id", "def Title(self, default=None):\n return self.data.get('title', default)", "def title(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"title\")", "def title(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"title\")", "def title(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"title\")", "def title(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"title\")" ]
[ "0.7058361", "0.68127316", "0.6649041", "0.6649041", "0.6649041", "0.6649041", "0.6649041", "0.6649041", "0.6649041", "0.6649041", "0.6649041", "0.66319466", "0.662835", "0.65403503", "0.6520874", "0.6478558", "0.643827", "0.6429088", "0.6386452", "0.6386452", "0.6386452", "0.63724643", "0.63583994", "0.6335865", "0.62824434", "0.62696826", "0.62696826", "0.62696826", "0.62696826", "0.62696826", "0.62696826", "0.62696826", "0.62696826", "0.62696826", "0.62696826", "0.62317127", "0.6169583", "0.61199826", "0.611653", "0.60753804", "0.604686", "0.6041517", "0.60407096", "0.60022855", "0.59890157", "0.59773606", "0.59757215", "0.59553903", "0.59303194", "0.58983684", "0.5895561", "0.5892877", "0.5888553", "0.58294624", "0.5826926", "0.5809076", "0.58003134", "0.57624626", "0.5750972", "0.5750972", "0.57445306", "0.57445306", "0.57445306", "0.57445306", "0.57445306", "0.57445306", "0.5719861", "0.56942207", "0.5659612", "0.56320375", "0.5624634", "0.56027967", "0.560019", "0.5596213", "0.5596213", "0.5592645", "0.5580841", "0.5555799", "0.55401564", "0.55329686", "0.55303186", "0.5527588", "0.5527588", "0.55176884", "0.5503331", "0.5502509", "0.5500384", "0.5500384", "0.5500384", "0.5500384", "0.548921", "0.54887974", "0.5486478", "0.54841083", "0.54839176", "0.5481054", "0.5480948", "0.5480948", "0.5480948", "0.5480948" ]
0.5996474
44
If a resource has a title, it should be included in the string representation.
def test_str_with_title(media_resource_factory): resource = media_resource_factory(title="Test Resource") assert str(resource) == f"{resource.id} ({resource.title})"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resource_link_title(self):\n return self.request.POST.get(\"resource_link_title\", self.resource_link_id)", "def get_resource_details (self):\n return (f\"[Title:\\\"{self.get_title()}\\\"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]\")", "def res_title(self):\n return self.get(\"res_title\", default=None, decode=True)", "def short_title(self):\n if hasattr(self, \"title\"):\n return self.title\n else:\n return \"\"", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def get_title_repr(self) -> str:\n try:\n return Title[self.title].value\n except (KeyError, ValueError):\n pass", "def __str__(self):\n return str(self.title)", "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def __str__(self):\n \n return self.title", "def format_title(self, data):\n return data", "def safe_title(self):\n try:\n return self.title\n except ObjectDoesNotExist:\n return None", "def title(self, obj):\n return str(obj)", "def test_str_no_title(media_resource_factory):\n resource = media_resource_factory()\n\n assert str(resource) == str(resource.id)", "def get_title(self) -> str:\n pass", "def get_title():", "def name_with_title(self):\n return \"%s %s\" % (self.title, self.name)", "def complete_alt_title(self, obj):\n return str(obj)", "def __str__(self):\n\t\treturn self.title", "def __str__(self):\n\t\treturn self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def __str__(self):\n return self.Title", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def title(self):\n return self['title']", "def GetTitle(self):\n return str(self.title)", "def Title(self, default={}):\n return HEP.TitleObject(self.data.get('title', default))", "def __str__(self):\n\n return self.title", "def get_title(cls, obj, **kwargs):\n if isinstance(obj.data, dict):\n titles = filter(None, get_value(obj.data, \"titles.title\", []))\n if titles:\n # Show first title that evaluates to True\n return titles[0]\n return \"No title available\"", "def title(self, title: \"str\"):\n self._attrs[\"title\"] = title", "def title(self, title: \"str\"):\n self._attrs[\"title\"] = title", "def title(self, title: \"str\"):\n self._attrs[\"title\"] = title", "def title(self, title: \"str\"):\n self._attrs[\"title\"] = title", "def title(self) -> str:\n pass", "def title(self) -> str:\n raise NotImplementedError", "def get_title(self) -> Optional[str]:\n return self.title", "def title(self) -> Optional[str]:\n if self._title is not None:\n return self._title\n if self._target_object is not None and isinstance(\n self._target_object, pystac.Catalog\n ):\n return self._target_object.title\n return None", "def title(self):\n return self.get(\"title\")", "def set_title(self, title):\n if check_data_exist(title) is True:\n self.title = title.text", "def title(self, new_title):\n\n # Check a type of 'new_title' parametr\n if not isinstance(new_title, basestring):\n raise TypeError('string type expected')\n self._title = new_title", "def test_Entry_title(self):\n test_entry = self.create_Entry()\n self.assertTrue(test_entry.title == str(test_entry))", "def title(self) -> str:\n return self._search_in_properties(ATTR_TITLE)", "def get_title(self):\n return self.title", "def get_title(self):\n return self.title", "def get_title(self):\n return self.title", "def title(self) -> \"str\":\n return self._attrs.get(\"title\")", "def title(self) -> \"str\":\n return self._attrs.get(\"title\")", "def title(self) -> \"str\":\n return self._attrs.get(\"title\")", "def title(self) -> \"str\":\n return self._attrs.get(\"title\")", "def get_title(self):\n title = self.title\n if not title and self.parent_id:\n title = self.parent.title\n return title", "def title(self):\n return self.values.get('title')", "def title(self):\n strng = \"\"\n if self.type:\n strng = self.type\n if self.server:\n if self.status:\n strng = \"%s\" % (strng)\n if not strng:\n strng = \"Error\"\n strng = \"%s on %s\" % (strng, self.server)\n elif self.status:\n strng = self.status\n if self.server:\n strng = \"%s on server %s\" % (strng, self.server)\n elif self.raw:\n strng = self.raw\n else:\n strng = self.error_timestamp.isoformat()\n if self.uid:\n strng = \"%s\" % (strng)\n return strng", "def title(self):\n\n return self._title", "def title(self):\n return self.__values['title']", "def test_title(self):\n key = api.portal.get_registry_record(\n 'plone.site_title'\n )\n self.assertEqual(u'Briefy CMS', key)", "def Title(self, default=None):\n return self.data.get('title', default)", "def title(self):\n return self.properties.get('Title', None)", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def title(self, title):\n\n self._title = title", "def get_title(self):\n return \"{id}@{hn}\".format(id=self.model.identity, hn=self.model.hostname)", "def title(self):\n return ' '.join(self._title)", "def _get_title_tag(self, item):\n tag = '<{heading}><a href=\"{href}\">{title}</a></{heading}>'\n if self._field_is_visible(\"title\"):\n tile_conf = self.get_tile_configuration()\n title_conf = tile_conf.get(\"title\", None)\n if title_conf:\n heading = title_conf.get(\"htmltag\", \"h2\")\n href = item.absolute_url()\n title = item.Title()\n return tag.format(heading=heading, href=href, title=title)", "def __str__(self):\n len_title=75\n if len(self.description)>len_title:\n titlestring=self.description[:len_title] + '...'\n else:\n titlestring=self.description\n return titlestring", "def title(self, value: str):\n self._title = value", "def getTitle(self, item):\n return item.Title() or item.getId()", "def get_title(self):\n\n return self.title", "def title(self):\n return self.get(self._names[\"title\"])" ]
[ "0.68804574", "0.6865952", "0.6629121", "0.6599802", "0.6590698", "0.6551943", "0.6400065", "0.6386993", "0.6346398", "0.632626", "0.63220584", "0.63117653", "0.6293808", "0.62767816", "0.6276447", "0.62574726", "0.6230404", "0.61885935", "0.6185459", "0.61847913", "0.61847913", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6176147", "0.6169051", "0.611775", "0.6099699", "0.60953367", "0.6085501", "0.60701483", "0.6053991", "0.6052323", "0.60350305", "0.60350305", "0.60350305", "0.60350305", "0.6027491", "0.60236675", "0.60229295", "0.6017456", "0.60131425", "0.60127443", "0.5996869", "0.5996146", "0.5993514", "0.59928006", "0.59928006", "0.59928006", "0.5983924", "0.5983924", "0.5983924", "0.5983924", "0.59828794", "0.5978829", "0.5968778", "0.5958234", "0.5954821", "0.59458303", "0.5945811", "0.5940779", "0.59374344", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.59333247", "0.5926526", "0.5921994", "0.5914297", "0.5911228", "0.58962363", "0.5887206", "0.5884079", "0.5881229" ]
0.7142992
0
Media resources should be ordered by creation time, ascending.
def test_ordering(media_resource_factory): m1 = media_resource_factory() m2 = media_resource_factory() assert list(models.MediaResource.objects.all()) == [m1, m2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sorted_img_list():\n dirPath=settings.BASE_DIR\n imgdir=\"/pttWeb/static/topicmodel\"\n fileID=glob.glob(dirPath+imgdir+\"/*.png\")\n fileID=[i.replace('/home/stream/Documents/minimum_django/pttWeb/static/','') for i in fileID]\n fileID=[Week_Image(i) for i in fileID]\n fileID.sort(key=lambda x: x.date, reverse=True)\n #translate . to / since javascript parsing date has some issue!\n fileID=[(i.filename,date_trans_z(i.date.strftime(\"%Y.%m.%d\"))) for i in fileID]\n return fileID", "def filter(self):\n for f in FileHelper.ALL_PATHS:\n media_obj = MediaObject(FileHelper.get_url(f), FileHelper.get_title(f), FileHelper.get_media_type(f), FileHelper.get_icon(f), FileHelper.get_duration(f), FileHelper.get_ctype(f))\n _id = media_obj.uuid\n if media_obj.media_type == \"image\":\n DB.IMAGES[_id] = media_obj\n elif media_obj.media_type == \"audio\":\n DB.MUSIC[_id] = media_obj\n elif media_obj.media_type == \"video\":\n DB.VIDEOS[_id] = media_obj\n else:\n print \"File '%s' doesn't play nice.\" % (f)", "def __get_sorted_file_list(self):\n d = self.__view.CurrentImgDir\n list = os.listdir(d)\n if self.__view.SortType == constant.THUMB_SORT_FILENAME:\n # Sort by Name\n list.sort()\n if self.__view.SortType == 2:\n # Sort by Size\n list.sort(lambda a, b: int(os.stat(os.path.join(d,a))[stat.ST_SIZE] - os.stat(os.path.join(d,b))[stat.ST_SIZE])) \n return list", "def test_get_resources_ordered(db_session):\n query_params = {\n \"sort\": \"-album_id,title\"\n }\n parser = ModelQueryParamParser(query_params)\n album_resource = AlbumResource(session=db_session)\n result = album_resource.get_collection(\n filters=parser.parse_filters(album_resource.model),\n sorts=parser.parse_sorts()\n )\n assert len(result) == 347\n assert result[0][\"album_id\"] == 347", "def get_list(self ):\n headers = { 'Authorization' : self.client.authorization_header }\n response = requests.get(\n self.client.url + '/media', \n headers = headers\n )\n\n return json.loads(response.text)", "def ordered_images(self):\n return self.images.order_by('story_images__id')", "def listall(self):\n list_query = \"\"\"SELECT * FROM %s\"\"\" % MediaCollection.COLLECTIONS_TABLE\n self.cursor.execute(list_query)\n return [Media.fromtuple(media) for media in self.cursor.fetchall()]", "def get_guid_objects(queue, media_base, absolute=False):\n index = 1\n guid_map = {}\n object_map = {}\n if absolute:\n image_base = \"{0}\".format(media_base) + \"/media/{0}/{1}\"\n else:\n image_base = \"./media/{0}/{1}\"\n logging.info(\"Collecting GUID object information\")\n for file_object in os.scandir(media_base + \"/metadata/guid\"):\n if not file_object.is_file():\n continue\n with open(file_object.path, \"r\") as toml_file:\n metadata = toml.load(toml_file)\n base_name = os.path.basename(metadata[\"image\"])\n image_name = image_base.format(metadata[\"type\"], base_name)\n image_extension = image_name.split(\".\").pop(-1)\n\n picture_name = metadata[\"title\"]\n if picture_name == \"\":\n picture_name = \"Untitled\"\n\n object_id = \"@M{0}@\".format(index)\n object_entry = [\n \"0 {0} OBJE\".format(object_id),\n \"1 FILE {0}\".format(image_name),\n \"1 FORM {0}\".format(image_extension),\n \"1 TITL {0}\".format(picture_name),\n ]\n\n if metadata[\"type\"] == \"portrait\":\n object_entry.append(\"1 TYPE Photo\")\n else:\n object_entry.append(\"1 TYPE {0}\".format(metadata[\"type\"].title()))\n\n if \"url\" in metadata and metadata[\"url\"] != \"\":\n object_entry.append(\"1 NOTE {0}\".format(metadata[\"url\"]))\n\n if \"facts\" in metadata:\n facts = metadata[\"facts\"]\n for key in [\"description\", \"transcription\"]:\n if key in facts and facts[key] != \"\":\n note = build_note(facts[key], 1)\n if len(note) > 0:\n for item in note:\n object_entry.append(item)\n\n object_map.update({object_id: object_entry})\n guid_map.update({metadata[\"guid\"]: object_id})\n index = index + 1\n if index > 99999:\n logging.error(\"100000 GUID objects not supported, APID range starts there\")\n sys.exit(1)\n logging.info(\"GUID object collection completed\")\n queue.put((guid_map, object_map))", "def list_media(storage, filter_list):\n results = []\n total = 0\n try:\n for media in storage.listdir('.')[1]:\n if not media.endswith('/') and media != \"\":\n location = storage.url(media).split('?')[0]\n total += 1\n if not filter_list or location in filter_list:\n results += [\n {'location': location,\n 'tags': MediaTag.objects.filter(\n location=location).values_list(\n 'tag', flat=True)\n }]\n except OSError:\n LOGGER.exception(\n \"Unable to list objects in %s.\", storage.__class__.__name__)\n except S3ResponseError:\n LOGGER.exception(\n \"Unable to list objects in %s bucket.\", storage.bucket_name)\n return {'count': total, 'results': results}", "def get_apid_objects(queue, media_base, args, absolute=False):\n work_lock = Lock()\n work_queue = Queue()\n\n readers = os.cpu_count()\n read_lock = Lock()\n read_queue = Queue()\n read_processes = []\n for number in range(readers):\n read_process = Process(\n target=read_apids, args=(read_queue, read_lock, work_queue, work_lock)\n )\n read_process.start()\n read_processes.append(read_process)\n\n logging.info(\"Collecting APID object information\")\n file_list = []\n file_total = 0\n for file_object in os.scandir(media_base + \"/metadata/apid\"):\n if not file_object.is_file():\n continue\n file_list.append({\"fileName\": file_object.path})\n file_total = file_total + 1\n\n read_lock.acquire()\n for item in file_list:\n read_queue.put(item)\n for item in read_processes:\n read_queue.put({\"exit\": True})\n read_lock.release()\n\n index = 100000\n apid_image_map = {}\n apid_screenshot_map = {}\n apid_full_map = {}\n object_map = {}\n image_cache = {}\n item_count = 0\n if absolute:\n image_base = \"{0}\".format(media_base) + \"/media/{0}\"\n else:\n image_base = \"./media/{0}\"\n while True:\n work_lock.acquire()\n if not work_queue.empty():\n metadata = work_queue.get()\n work_lock.release()\n else:\n work_lock.release()\n time.sleep(0.01)\n continue\n\n item_count = item_count + 1\n apid_full_map.update({metadata[\"apid\"]: metadata})\n if \"image\" in metadata:\n if metadata[\"image\"] not in image_cache:\n base_name = metadata[\"image\"].split(\"/media/\").pop(1)\n image_name = image_base.format(base_name)\n image_extension = image_name.split(\".\").pop(-1)\n\n object_id = \"@M{0}@\".format(index)\n object_entry = [\n \"0 {0} OBJE\".format(object_id),\n \"1 FILE {0}\".format(image_name),\n \"1 FORM {0}\".format(image_extension),\n \"1 TYPE document\",\n ]\n\n object_map.update({object_id: object_entry})\n image_cache.update({metadata[\"image\"]: object_id})\n index = index + 1\n else:\n object_id = image_cache[metadata[\"image\"]]\n apid_image_map.update({metadata[\"apid\"]: object_id})\n if \"screenshot\" in metadata:\n base_name = os.path.basename(metadata[\"screenshot\"])\n image_name = image_base.format(\"apid\") + \"/\" + base_name\n image_extension = image_name.split(\".\").pop(-1)\n\n if \"title\" in metadata and metadata[\"title\"] != \"\":\n title = metadata[\"title\"]\n else:\n title = \"Ancestry.com Source Record, {0}\".format(metadata[\"apid\"])\n\n object_id = \"@M{0}@\".format(index)\n object_entry = [\n \"0 {0} OBJE\".format(object_id),\n \"1 FILE {0}\".format(image_name),\n \"1 FORM {0}\".format(image_extension),\n \"1 TITL {0}\".format(title),\n \"1 REFN {0}\".format(metadata[\"apid\"]),\n ]\n\n if \"url\" in metadata and metadata[\"url\"] != \"\":\n object_entry.append(\"1 NOTE {0}\".format(metadata[\"url\"]))\n\n object_map.update({object_id: object_entry})\n index = index + 1\n apid_screenshot_map.update({metadata[\"apid\"]: object_id})\n\n if item_count == file_total:\n break\n\n for read_process in read_processes:\n read_process.join()\n queue.put((apid_image_map, apid_screenshot_map, apid_full_map, object_map))\n logging.info(\"APID object collection completed\")", "def recent_media(self):\r\n return media.RecentMedia(self)", "def recent_media(self):\r\n return media.RecentMedia(self)", "def recent_media(self):\r\n return media.RecentMedia(self)", "def recent_media(self):\r\n return media.RecentMedia(self)", "def location_medias_top_v1(\n self, location_pk: int, amount: int = 21\n ) -> List[Media]:\n return self.location_medias_v1(location_pk, amount, tab_key=\"ranked\")", "def get_meta_of_files(session=konfuzio_session()) -> List[dict]:\n url = get_documents_meta_url()\n result = []\n\n while True:\n r = retry_get(session, url)\n data = r.json()\n if isinstance(data, dict) and 'results' in data.keys():\n result += data['results']\n if 'next' in data.keys() and data['next']:\n url = data['next']\n else:\n break\n else:\n result = data\n break\n\n sorted_documents = sorted(result, key=itemgetter('id'))\n return sorted_documents", "def location_medias_recent_v1(\n self, location_pk: int, amount: int = 63\n ) -> List[Media]:\n return self.location_medias_v1(location_pk, amount, tab_key=\"recent\")", "def recent_media(self):\n return media.RecentMedia(self)", "def ordered(cls, objs):\n objs = list(objs)\n try:\n objs.sort(key=lambda o: o.latest_message.created, reverse=True)\n except:\n pass\n return objs", "def __count_media_files(self, CurrentDir):\n self.__counter_lock.acquire()\n self.media_counter = [0, 0]\n self.__counter_lock.release()\n if os.path.exists(CurrentDir):\n for i in os.listdir(CurrentDir):\n MediaType = self.thumb_filter(CurrentDir,i)\n if MediaType == TYPE_PHOTO:\n self.__counter_lock.acquire()\n self.media_counter[0] += 1\n self.__counter_lock.release()\n elif MediaType == TYPE_VIDEO:\n self.__counter_lock.acquire()\n self.media_counter[1] += 1 \n self.__counter_lock.release()", "def getMediaFiles(path):\n fileList = getMediaFileList(path)\n # dirList = getDirectoryList(path)\n\n # results = map(getMediaFiles, dirList)\n\n # for result in results:\n # fileList = fileList + result\n\n return fileList", "def get_real_media(self, provider_name):\n return [Media(f, provider_name) for f in self.videos]", "def test_basic_functionality(self):\n self.assertEqual(mpmodels.MediaItem.objects.count(), 0)\n video = make_video(media_id='1234', title='test title')\n set_resources_and_sync([video])\n self.assertEqual(mpmodels.MediaItem.objects.count(), 1)\n item = mpmodels.MediaItem.objects.get(jwp__key=video.key)\n self.assertEqual(item.title, 'test title')", "def get_json_media(self, provider_name):\n return [JsonMedia(f, provider_name) for f in self.datafiles[provider_name]]", "def get_queryset(self):\n samples = AudioSample.objects.distinct()\n if samples:\n return samples.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')\n else:\n return []", "def shuffled_thumbnails(self):\n while True:\n video_id = random.choice(self.video_ids)\n metadata = self._id_to_meta[video_id]\n thumbs = [th for th in self.video_thumbnails(video_id)]\n if thumbs:\n yield random.choice(thumbs) + (metadata,)", "def location_medias_recent_a1(\n self, location_pk: int, amount: int = 24, sleep: float = 0.5\n ) -> List[Media]:\n return self.location_medias_a1(\n location_pk, amount, sleep=sleep, tab_key=\"edge_location_to_media\"\n )", "def _release_last_resources(self):\n last_resources, self.http_resources = self.http_resources[:], []\n return last_resources", "def test_get(self):\n fields = {\n 'title': 'Test media title',\n 'description': 'Test media description',\n 'local_media_file': 'test_64K_short.mp3',\n }\n\n media_filename = \"%s/%s\" % (settings.get('base', 'path.local.media'), fields['local_media_file'])\n m = Media.create(\n client=self.client,\n media_filename=media_filename,\n title=fields['title'],\n description=fields['description'],\n )\n\n m = Media.get(client=self.client, uuid=m.uuid)\n assert m.title == fields['title']\n assert m.description == fields['description']", "def mediaGenerator(request):\n folder = 'content/' + request\n mediaPaths = glob(folder + '/*')\n return random.choice(mediaPaths)", "def test_videos_default_ordering(mocker, logged_in_apiclient):\n mocker.patch(\"ui.serializers.get_moira_client\")\n mocker.patch(\"ui.utils.get_moira_client\")\n VideoSetPagination.page_size = 5\n client, user = logged_in_apiclient\n collection = CollectionFactory(owner=user)\n VideoFactory.create_batch(10, collection=collection)\n url = reverse(\"models-api:video-list\")\n p1_response = client.get(\"{}?page=1\".format(url))\n assert len(p1_response.data[\"results\"]) == 5\n for i in range(4):\n current_video_date = p1_response.data[\"results\"][i][\"created_at\"]\n next_video_date = p1_response.data[\"results\"][i + 1][\"created_at\"]\n assert current_video_date >= next_video_date\n\n p2_response = client.get(\"{}?page=2\".format(url))\n last_entry_data = p1_response.data[\"results\"][-1][\"created_at\"]\n first_entry_data = p2_response.data[\"results\"][0][\"created_at\"]\n assert last_entry_data >= first_entry_data\n for i in range(4):\n current_video_date = p2_response.data[\"results\"][i][\"created_at\"]\n next_video_date = p2_response.data[\"results\"][i + 1][\"created_at\"]\n assert current_video_date >= next_video_date", "def _sort_by_duration(self) -> None:\n total_samples = len(self.paths)\n if total_samples == 0:\n return\n samples = zip(self.paths, self.durations, self.transcriptions)\n sorted_samples = sorted(samples, key=lambda sample: sample[1])\n self.paths, self.durations, self.transcriptions = [\n list(c) for c in zip(*sorted_samples)\n ]\n assert (\n total_samples\n == len(self.paths)\n == len(self.durations)\n == len(self.transcriptions)\n ), \"_sort_by_duration len mis-match\"", "def test_list_namespaced_image_stream(self):\n pass", "def sort_album(self):\n self.sort('album')", "def last_videos_recorded(self) -> list:\n return sorted(glob.glob(VIDEOS_DIR), key=os.path.getmtime)[-20:]", "def location_medias_recent(\n self, location_pk: int, amount: int = 63, sleep: float = 0.5\n ) -> List[Media]:\n try:\n return self.location_medias_recent_a1(location_pk, amount, sleep)\n except Exception:\n # Users do not understand the output of such information and create bug reports\n # such this - https://github.com/adw0rd/instagrapi/issues/364\n # if not isinstance(e, ClientError):\n # self.logger.exception(e)\n return self.location_medias_recent_v1(location_pk, amount)", "def recent_media(self, tag, max_tag_id = None, min_tag_id = None, count = 10):\n\n url = \"https://api.instagram.com/v1/tags/{0}/media/recent?access_token={1}\".format(tag, self.access_token)\n\n if max_tag_id:\n url += \"&max_tag_id=\" + str(max_tag_id)\n if min_tag_id:\n url += \"&min_tag_id=\" + str(min_tag_id)\n\n request = requests.get(url)\n return request.json()", "def location_medias_top_a1(\n self, location_pk: int, amount: int = 9, sleep: float = 0.5\n ) -> List[Media]:\n return self.location_medias_a1(\n location_pk, amount, sleep=sleep, tab_key=\"edge_location_to_top_posts\"\n )", "def sort_time(self):\n self.entries.sort(key=lambda x: x.date_stamp_utc)", "def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources", "def top_mediafiles(user_id, fields, sort_by, sort_desc=True, limit=10, randomize=False):\n query = MediaFiles.query \\\n .join(Locations, MediaFiles.location_id == Locations.id) \\\n .join(Users, MediaFiles.user_id == Users.id) \\\n .filter(MediaFiles.user_id == int(user_id))\n if randomize:\n query = query.order_by(func.random())\n else:\n query = query.order_by(sort_by.desc() if sort_desc else sort_by.asc())\n query = query.limit(limit).add_columns(*fields)\n logging.debug('Query executed: %s' % query)\n return query", "def test_fetch_or_create_disk_media_item_fetches_extant_items(db):\n data = datadir.join('1200x6566.png').read(mode='rb')\n item1 = media.fetch_or_create_media_item(data, file_type='png')\n db.flush()\n item2 = media.fetch_or_create_media_item(data, file_type='png')\n assert item1.mediaid == item2.mediaid", "def video_thumbnails(self, video_id):\n thumbs = glob.glob(os.path.join(self._id_to_path[video_id], 'thumbnail_*.*'))\n pairs = []\n id_index = self.video_ids.index(video_id)\n for thumb_path in thumbs:\n timestamp = int(thumb_path.split('_')[-1].split('.')[0])\n pairs.append((id_index, timestamp))\n return sorted(pairs, key=lambda x: x[1])", "def test_list_media_collection(self):\n self.login_editor()\n\n # Create a media collection object and test that is present on list\n MediaCollection.objects.create(name='Coleção 1',\n description='Coleção de teste 1',\n created_by_id=1, cooperative_center_code='BR1.1')\n\n MediaCollection.objects.create(name='Coleção 2',\n description='Coleção de teste 2',\n created_by_id=2, cooperative_center_code='BR1.1')\n\n MediaCollection.objects.create(name='Coleção 3',\n description='Coleção de teste 3',\n created_by_id=3, cooperative_center_code='PY3.8')\n\n\n response = self.client.get('/multimedia/collections')\n # check if only one collection is returned (restrict by user)\n self.assertContains(response, \"Coleção 1\")\n self.assertEquals(response.context['object_list'].count(), 3)\n\n # check if return only colections from cooperative center BR1.1\n response = self.client.get('/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)", "def self_media(self):\n\n url = \"https://api.instagram.com/v1/users/self/media/recent/?access_token={0}\".format(self.access_token)\n request = requests.get(url)\n return request.json()", "def get_ordered_resources(self):\n \n return self.resources.visible().order_by('members__ordering')", "def list_media(self,\n series_id: str,\n sort: Optional[SortOption] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n locale: Optional[Any] = None) -> list:\n params: Dict[str, Any] = {\n \"series_id\": series_id,\n }\n\n if sort:\n params[\"sort\"] = sort.value\n if limit:\n params[\"limit\"] = limit\n if offset:\n params[\"offset\"] = offset\n if locale:\n params[\"locale\"] = locale\n\n return self._api._api_call(\"list_media\", params)", "def _queue_photos(self):\n global filtering_queue\n global number_of_pictures\n\n number_of_pictures = 0\n\n for root, dirs, files in walk(curdir):\n for file in files:\n if dirs == \"thumb\" or dirs == \"filtered\":\n pass\n else:\n file_path = join(root, file)\n filtering_data = [file_path, curdir]\n filtering_queue.put(filtering_data)\n number_of_pictures += 1\n print(\"Queued:\", file_path)\n\n try:\n mkdir(join(curdir, \"thumb\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"thumb\", \"Alexander\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"thumb\", \"Bjarke\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"thumb\", \"Gabrielle\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"thumb\", \"Monica\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"thumb\", \"Wenche\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"filtered\"))\n except FileExistsError:\n pass", "def get_media():\n\n error_on_unauthorized()\n\n media = Upload.query.order_by(Upload.id)\n total_num = media.count()\n\n if total_num == 0:\n return jsonify(total=0, uploads=[])\n\n try:\n count = int(request.args.get('max', total_num))\n page = int(request.args.get('page', 1))\n\n if count <= 0 or page <= 0:\n raise APIError(422, \"Query parameters out of range\")\n\n begin = (page - 1) * count\n end = min(begin + count, total_num)\n\n return jsonify(total=total_num, uploads=[upload_to_dict(u) for u in media.all()[begin:end]]), 200\n except ValueError:\n raise APIError(422, \"Invalid query parameter\")", "def _sort_records(self):\n self.records.sort(reverse=True, key=lambda record: record.timestamp)", "def resources(self):", "def popular_media(self):\r\n return PopularMedia(self)", "def get_sorted_image_files(directory,priority='png'):\n \n #First get a list of file, start with priority, then all other image file types\n im_types = ['png','jpg','bmp','tif']\n im_types.remove(priority)\n \n file_list = glob(directory+'/*.'+priority)\n if not file_list:\n for im_type in im_types:\n file_list = glob(directory+'/*.'+im_type)\n if file_list:\n break\n\n #Currently assume standard mraw output filename\n sorted_list = sorted(file_list,key=lambda file_name: int(file_name.split('.')[0].split('S00')[-1][3:]))\n #print(file_list)\n #print(sorted_list)\n\n return sorted_list", "def test_adding_media_to_channel(self):\n videos = [\n make_video(title='test title', media_id='1'),\n make_video(title='test title 2', media_id='2'),\n ]\n channels = [make_channel(title='test channel', media_ids=['1'], collection_id='3')]\n set_resources_and_sync(videos, channels)\n c = mpmodels.Channel.objects.filter(sms__id='3').first()\n self.assertIsNotNone(c)\n self.assertEqual(len(c.items.all()), 1)\n channels[0]['custom']['sms_collection_media_ids'] = 'collection_media_ids:1,2:'\n channels[0]['updated'] += 1\n set_resources_and_sync(videos, channels)\n self.assertEqual(len(c.items.all()), 2)\n # also check playlist\n playlist = mpmodels.Playlist.objects.filter(sms__id='3').first()\n self.assertEqual(len(playlist.media_items), 2)", "def extract_media_v1(data):\n user = data[\"user\"]\n location = data.get(\"location\")\n if location:\n location = {\"pk\": int(location.get(\"pk\")), \"name\": location.get(\"name\")}\n video_url = \"\"\n if \"video_versions\" in data:\n # Select Best Quality by Resolutiuon\n video_url = sorted(\n data[\"video_versions\"], key=lambda o: o[\"height\"] * o[\"width\"]\n ).pop()[\"url\"]\n product_type = data.get(\"product_type\", \"\")\n if data[\"media_type\"] == 2 and not product_type:\n product_type = \"feed\"\n thumbnail_url = ''\n if 'image_versions2' in data:\n thumbnail_url = sorted(\n data[\"image_versions2\"][\"candidates\"],\n key=lambda o: o[\"height\"] * o[\"width\"],\n ).pop()[\"url\"]\n return {\n \"pk\": int(data[\"pk\"]),\n \"taken_at\": int(data[\"taken_at\"]),\n \"id\": data[\"id\"],\n \"media_type\": data[\"media_type\"],\n \"product_type\": product_type,\n \"code\": data[\"code\"],\n \"thumbnail_url\": thumbnail_url,\n \"location\": location,\n \"user\": extract_user_short(user),\n \"comment_count\": int(data.get(\"comment_count\") or 0),\n \"like_count\": int(data.get(\"like_count\") or 0), # the media just published has no like_count\n \"caption_text\": json_value(data, \"caption\", \"text\", default=\"\"),\n \"usertags\": [\n extract_usertag(usertag)\n for usertag in data.get(\"usertags\", {}).get(\"in\", [])\n ],\n \"video_url\": video_url,\n \"view_count\": int(data.get('view_count') or 0),\n \"video_duration\": data.get('video_duration'),\n \"title\": data.get(\"title\") or None,\n \"resources\": [\n extract_resource_v1(edge)\n for edge in data.get('carousel_media', [])\n ]\n }", "def get_files(self):\n m = []\n for post in self:\n m.append(post.FileName)\n return list(sorted(set(m), reverse=True))", "def import_media(self, path):\n media_vertex = {}\n infile = configparser.ConfigParser()\n infile.read(path, encoding='utf-8')\n # Use the path name for error messages or assignments\n for field in infile.items(\"media\"):\n if (field[0].find(\"photo\") != -1 and\n len(field[0].split(\".\")) == 2):\n # Process a small set of photo credits for all the pandas\n # author = infile.get(\"media\", field[0] + \".author\")\n # if author in self.photo[\"credit\"].keys():\n # self.photo[\"credit\"][author] = self.photo[\"credit\"][author] + 1\n # else:\n # self.photo[\"credit\"][author] = 1\n # Track what the max number of panda photos an object has is\n # test_count = int(field[0].split(\".\")[1])\n # if test_count > self.photo[\"max\"]:\n # self.photo[\"max\"] = test_count\n # Accept the data and continue\n media_vertex[field[0]] = field[1]\n # TODO: track video info for apple counting as well\n else:\n # Accept the data and move along\n media_vertex[field[0]] = field[1]\n self.media.append(media_vertex)\n self.vertices.append(media_vertex)\n self.media_files.append(path)", "def test_photos_sorted(pre_pop_transaction, rover_name, sol, camera):\n from mars_street_view.models import Photo\n data = Photo.get_rov_sol(rover_name, sol)\n photos_by_cam = data.get('photos_by_cam', {})\n photos = photos_by_cam.get(camera, [])\n urls_from_method = [photo.img_src for photo in photos]\n assert urls_from_method == list(sorted(urls_from_method))", "def _partition_existing_medias(self, incoming_medias):\n existing_media_urls = [m.url for m in list(self.gstreamers.values())]\n\n def media_exists(media):\n return media.url in existing_media_urls\n\n existing_media_ids = [m.id for m in incoming_medias if media_exists(m)]\n fresh_media_ids = [m.id for m in incoming_medias if not media_exists(m)]\n\n return existing_media_ids, fresh_media_ids", "def _build_adhoc_medias(self, media_list, media_type):\n adhoc_medias = []\n media_id = 0\n for media in media_list:\n media_name = 'adhoc_media_' + media_type + '_' + self.viewport_name + '_' + str(media_id)\n adhoc_media = AdhocMedia()\n adhoc_media.id = media_name\n adhoc_media.url = media['path']\n adhoc_media.geometry.x = media['x_coord'] + self._get_viewport_offset()['x']\n adhoc_media.geometry.y = media['y_coord'] + self._get_viewport_offset()['y']\n adhoc_media.geometry.width = media['width']\n adhoc_media.geometry.height = media['height']\n adhoc_media.media_type = media_type\n # TODO figure out if media['on_finish'] or media['activity_config']['onFinish'] is better\n adhoc_media.on_finish = media['on_finish']\n adhoc_media.extra_args = media.get('activity_config', {}).get('args', '')\n adhoc_medias.append(adhoc_media)\n media_id += 1\n\n logger.info(\"Returning adhocmedias: %s for player: %s\" % (adhoc_medias, media_type))\n return adhoc_medias", "def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename)\n for filename in filenames if filename.endswith(\".md\")))", "def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename)\n for filename in filenames if filename.endswith(\".md\")))", "def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename) for filename in filenames if filename.endswith(\".md\")))", "def list_of_medias(args, sourcedir, recursive):\n files = list_of_files(sourcedir, recursive)\n return [_ for _ in files if is_media_within_dates(_, args.dates)]", "def volume_sort(self):\n self.jobs_sorted = sorted(\n self.jobs,\n key=lambda job: (job['height'], job['width'] * job['height']),\n # key=lambda job: job['width'] * job['height'],\n reverse=True)", "def list_of_medias_ext(args, sourcedir):\n result = list()\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n fullname = os.path.join(sourcedir, basename)\n if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname):\n result.append(fullname)\n else:\n if is_media_within_dates(fullname, args.dates):\n result.append(fullname)\n return result", "def get_files_list(dirname, date_order, rdate_order):\n file_list = os.listdir(dirname)\n file_mtimes = dict.fromkeys(file_list)\n for f in file_list:\n if f[0] == '.':\n print \"Skipping file: \", f\n del file_mtimes[f]\n continue\n if date_order or rdate_order:\n file_mtimes[f] = os.stat(dirname + '/' + f).st_mtime\n if date_order or rdate_order:\n return sorted(file_mtimes.keys(), key=file_mtimes.get, reverse=rdate_order)\n else:\n return file_list", "def get_oscars_best_picture_list():\n list_file = 'oscar_best_picture_list.txt'\n f = open(list_file, 'r')\n film_list = []\n\n for line in f:\n words = line.split('-')\n film = {\n 'year': words[0][:-1],\n 'name': words[1][2:-2]\n }\n film_list.append(film)\n f.close()\n # Reverse as we want newest first not last\n film_list.reverse()\n return film_list", "async def create_media(self, community: Community):\n media_tab_url = f\"{self._api_stream_url}{community.id}/{self._api_media_tab}\"\n async with self.web_session.get(media_tab_url, headers=self._headers) as resp:\n if self.check_status(resp.status, media_tab_url):\n data = await resp.json()\n media_objects, photo_media_dicts = iterate_community_media_categories(data)\n\n # This endpoint does NOT give us any information about the photos, therefore we must make\n # a separate api call to retrieve proper photo information for the photo media.\n for media in photo_media_dicts:\n media_obj = await self.fetch_media(community.id, media.get(\"id\"))\n if media_obj:\n media_objects.append(media_obj)\n\n self._add_media_to_cache(media_objects)", "def location_medias_top(\n self, location_pk: int, amount: int = 27, sleep: float = 0.5\n ) -> List[Media]:\n try:\n return self.location_medias_top_a1(location_pk, amount, sleep)\n except Exception:\n # Users do not understand the output of such information and create bug reports\n # such this - https://github.com/adw0rd/instagrapi/issues/364\n # if not isinstance(e, ClientError):\n # self.logger.exception(e)\n return self.location_medias_top_v1(location_pk, amount)", "def resortFiles(fileList):\n if fileList is None or not len(fileList):\n print \"SRT:nofiles in the dictionary.\"\n sys.exit()\n\n new_file_list = list()\n for f in fileList:\n new_file_list.append(PFileStat(dir_source, f, os.lstat(dir_source + \"/\" + f)))\n\n new_file_list.sort(key=lambda i: i.st_mtime)\n return new_file_list", "def get_queryset(self):\n queryset = MediaFile.objects.all()\n username = self.request.query_params.get('username', None)\n userqueryset = User.objects.all()\n users = userqueryset.filter(username=username)\n if len(users) and username is not None:\n queryset = queryset.filter(owner=users[0])\n return queryset", "def get_all_mediafiles(user_id, params=None, fields=None):\n fields = fields or [MediaFiles.id, MediaFiles.user_id, MediaFiles.path, MediaFiles.duration,\n MediaFiles.size, MediaFiles.title, MediaFiles.comment, MediaFiles.tags,\n MediaFiles.description, MediaFiles.coords, MediaFiles.location_id,\n MediaFiles.year, MediaFiles.created, MediaFiles.imported,\n MediaFiles.updated, MediaFiles.accessed, MediaFiles.visits]\n query = MediaFiles.query \\\n .outerjoin(Locations, MediaFiles.location_id == Locations.id) \\\n .outerjoin(Users, MediaFiles.user_id == Users.id)\n\n year = params.get('year', 'any')\n if year != 'any' and re.match('[1-2]{1}[0-9]{3}', year) or year == '0':\n query = query.filter(MediaFiles.year == year)\n\n location = params.get('location', 'any')\n if location != 'any':\n query = query.filter(MediaFiles.location_id == location)\n\n if user_id != -1: # user_id = -1 will not apply filtering on ownership (used for statistics)\n public = params.get('ownership_public')\n private = params.get('ownership_private')\n if private or public:\n ownership = []\n if public:\n ownership.append(MediaFiles.user_id == 0)\n if private:\n ownership.append(MediaFiles.user_id == int(user_id))\n query = query.filter(or_(*ownership))\n else: # If none of Public/Private selected, then result must be empty\n query = query.filter(MediaFiles.user_id == -1)\n\n entry = params.get('search', '').strip()\n matching = params.get('tags_matching')\n if entry:\n other_matches = []\n if params.get('search_in_path'):\n other_matches.append(MediaFiles.path.contains(entry))\n if params.get('search_in_title'):\n other_matches.append(MediaFiles.title.contains(entry))\n if params.get('search_in_description'):\n other_matches.append(MediaFiles.description.contains(entry))\n if params.get('search_in_comment'):\n other_matches.append(MediaFiles.comment.contains(entry))\n query = query.filter(or_(*other_matches))\n # Note: tags filtering is applied if no other terms are selected\n if params.get('search_in_tags') and not other_matches:\n tag_matches = []\n for tag in entry.split():\n tag_matches.append(MediaFiles.tags.contains(tag))\n if matching == 'strict':\n query = query.filter(and_(*tag_matches))\n else:\n query = query.filter(or_(*tag_matches))\n query = query.add_columns(*fields).order_by(func.random())\n logging.debug('Query executed: %s' % query)\n return query", "def test_list(self):\n self.add_to_queue('not-image.txt')\n self.add_to_queue('riker.gif')\n\n rv = self.get('/queue/', token=self.user_token)\n\n expected = {\"filelist\": [{\"filename\": \"riker.gif\",\n \"url\": \"/queue/riker.gif\"\n }\n ]}\n self.assertJSONOk(rv, **expected)\n return", "def sort_duration(self):\n self.sort('duration')", "def get_yaml_media(self, provider_name):\n return [YamlMedia(f, provider_name) for f in self.datafiles[provider_name]]", "def __init__(self, config, media_resource_id, start_index = 0, start_time = 0):\n self.config = config\n self.media_resource_id = media_resource_id\n self.start_index = start_index\n self.start_time = start_time\n self.end_index = self.start_index", "def sorted_files(self, pattern=None):\n return sorted(self.files(pattern))", "def list_sorted_files(uuid, basepath=None):\n if basepath is None:\n basepath = get_basepath()\n if 's3://' in basepath:\n return s3wrangler.list_objects(basepath + 'ephys/' + uuid + '/derived/kilosort2/')\n else:\n # return glob.glob(os.path.join(basepath, f'ephys/{uuid}/derived/kilosort2/*'))\n return glob.glob(basepath + f'ephys/{uuid}/derived/kilosort2/*')", "def get_all(self, start_at, limit, order=None):\n result = []\n objects = []\n if limit == 0:\n objects = self.items[start_at:]\n else:\n objects = self.items[start_at:(start_at + limit)]\n for item in objects:\n result.append(FileDict(item))\n return result", "def get_resources(self, resource_data=None):\n if not resource_data and self.component:\n resource_data = self.component.get_resource_data()\n\n resources = []\n for resource in self.files:\n resource.update(resource_data)\n\n resource['storage_path'] = self.prefix + '/' + resource['name']\n relative_path = self.relative_path(data=resource)\n resource['relative_path'] = relative_path\n resource['url'] = resource['url'] + '/' + relative_path\n resources.append(resource)\n return resources", "def download_media_from_bandwidth(media_urls):\n downloaded_media_files = []\n for media_url in media_urls:\n media_id = get_media_id(media_url)\n filename = get_media_filename(media_url)\n with open(filename, \"wb\") as f:\n try:\n downloaded_media = messaging_client.get_media(MESSAGING_ACCOUNT_ID, media_id)\n f.write(downloaded_media.body)\n except Exception as e:\n print(e)\n downloaded_media_files.append(filename)\n return downloaded_media_files", "def get_movie_thumbnails(time_period, page, filters):\n movies = Movie.objects.filter(**filters) \\\n .order_by('-item__popularity__' + time_period) \\\n .values('title', 'url', 'synopsis', 'image', 'theater_date') \\\n .distinct()\n paginator = Paginator(movies, 12)\n\n try:\n next_page = paginator.page(page).next_page_number()\n paginator.page(next_page)\n except (EmptyPage, InvalidPage):\n next_page = ''\n\n response = [{ \n 'title': escape(movie['title']),\n 'url': reverse('movie-profile', args=[movie['url']]),\n 'synopsis': escape(movie['synopsis'][:140]),\n 'image_url': get_thumbnail(movie['image'], 'x285').url,\n 'next': next_page \n } for movie in paginator.page(page)] \n\n return simplejson.dumps(response)", "def test_fetch_or_create_disk_media_item_with_attributes(db):\n data = datadir.join('1200x6566.png').read(mode='rb')\n item = media.fetch_or_create_media_item(data, file_type='png', attributes={'spam': 'eggs'})\n assert item.attributes == {'spam': 'eggs'}", "def test_sorting_album_year_time_added(self):\n self.add_mp3(artist='Artist', title='Title 1',\n album='Album 1', year=2017, filename='song1.mp3')\n self.add_mp3(artist='Artist', title='Title 2',\n album='Album 2', year=2017, filename='song2.mp3')\n self.run_add()\n al2 = self.age_album('Artist', 'Album 2', 10)\n self.assertEqual(Album.objects.count(), 2)\n\n albums = [\n al2,\n Album.objects.get(name='Album 1'),\n ]\n artist = Artist.objects.get(name='Artist')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)), {'album-sort': 'year'})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['albums'].data), 2)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(al) for al in albums])\n self.assertContains(response, '\"?album-sort=-year\"')\n\n # test reverse sort\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)), {'album-sort': '-year'})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['albums'].data), 2)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(al) for al in reversed(albums)])\n self.assertContains(response, '\"?album-sort=year\"')", "def getPictures (self, list) :\n\n result = []\n for event in list :\n eventEntry = {}\n eventEntry ['id'] = link = event.answer.id\n eventEntry ['time'] = event.timeOf\n eventEntry ['comments'] = event.answer.comments\n eventEntry ['location'] = self.where (event.answer)\n eventEntry ['problem'] = event.answer.survey ['problem_type']\n eventEntry ['pictures'] = self.pic (Picture.objects.filter (answer__id = link))\n result.append (eventEntry)\n \n return result", "def get_sorted(self, collection, xmlFormat):\n\t\treturn {\n\t\t\t\"verb\": \"Search\",\n\t\t\t\"xmlFormat\": xmlFormat,\n\t\t\t'sortDescending' : '/text//itemRecord/metaMetadata/dateInfo/@lastModified',\n\t\t\t\"ky\": collection\n\t\t\t}", "def generate(self):\n if len(self.files) == 0:\n raise Exception('no files to process')\n music = []\n for filename in self.files:\n music.extend(self._process_file(filename))\n return self._extract_raw(sorted(music, key=lambda tup: (tup[0], tup[1])))", "def _get_current_media(self):\n key = int(self.status.content_id.split(\"/\")[-1])\n media_item = self.pms.fetchItem(key).reload()\n media_idx = self.status.media_custom_data.get(\"mediaIndex\", 0)\n part_idx = self.status.media_custom_data.get(\"partIndex\", 0)\n media = media_item.media[media_idx]\n part = media.parts[part_idx]\n\n return media_item, media, part", "def set_media_versions(self):\n\n # access to .multimedia_map is slow\n previous_version = self._get_version_comparison_build()\n prev_multimedia_map = previous_version.multimedia_map if previous_version else {}\n\n for path, map_item in self.multimedia_map.items():\n prev_map_item = prev_multimedia_map.get(path, None)\n if prev_map_item and prev_map_item.unique_id:\n # Re-use the id so CommCare knows it's the same resource\n map_item.unique_id = prev_map_item.unique_id\n if (prev_map_item and prev_map_item.version\n and prev_map_item.multimedia_id == map_item.multimedia_id):\n map_item.version = prev_map_item.version\n else:\n map_item.version = self.version", "def test_create_local_metadata(self):\n local_media = {\n 'path': 'test_mp4_short.mp4',\n 'title': 'Test media title',\n 'description': 'Test media description',\n }\n\n media_filename = \"%s/%s\" % (settings.get('base', 'path.local.media'), local_media['path'])\n\n self.model = Media.create(\n client=self.client,\n media_filename=media_filename,\n title=local_media['title'],\n description=local_media['description'],\n )\n\n media_item = Media.get(client=self.client, uuid=self.model.uuid)\n assert media_item.title == local_media['title']\n assert media_item.description == local_media['description']\n #TODO: assert creator is owner", "def test_list_namespaced_image_stream_tag(self):\n pass", "def media(self, request, *args, **kwargs):\n conversation = self.get_object()\n media_attachments = conversation.media_attachments\n self.pagination_class = ShoutitPageNumberPagination\n page = self.paginate_queryset(media_attachments)\n # Todo: Only keep the message attachments that were not deleted by this user\n serializer = MessageAttachmentSerializer(page, many=True, context={'request': request})\n return self.get_paginated_response(serializer.data)", "def get_queryset(self):\n return Picture.objects.all()", "def list_of(self):\r\n self.files = os.listdir(self.p)\r\n self.size = [0] * len(self.files)\r\n self.created = [0] * len(self.files)\r\n self.modified = [0] * len(self.files)\r\n total_size = 0\r\n iteration = 0\r\n for file in self.files:\r\n self.fol = os.path.join(self.p, file)\r\n self.modified[iteration] = time.ctime(os.path.getmtime(f\"{self.fol}\"))\r\n self.created[iteration] = time.ctime(os.path.getctime(f\"{self.fol}\"))\r\n for path, dirs, files in os.walk(self.fol):\r\n for fol in files:\r\n fpath = os.path.join(path, fol)\r\n total_size += os.path.getsize(fpath)\r\n self.size[iteration] = total_size\r\n iteration += 1\r\n return self.files, self.size, self.created, self.modified", "def resources(self):\r\n return self.page.object_list", "def get_queryset(self):\n return Item.objects.filter(owner=self.request.user).order_by('-created').prefetch_related('tags')", "def get_resources(self):\n return []", "def _topological_sort_metadata(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def archive_list(self) -> List[str]:\n bucket = self.client()\n results = []\n for item in bucket.objects.all():\n if (\n item.key.endswith(\".arcd\") or item.key.endswith(\".arcd.gpg\")\n ) and \"meta\" not in item.key:\n results.append(item.key.split(\".\", 1)[0])\n return results" ]
[ "0.61875653", "0.60358816", "0.59814835", "0.5946629", "0.5863695", "0.57058465", "0.5681557", "0.5633815", "0.55823237", "0.5550474", "0.5542779", "0.5542779", "0.5542779", "0.5542779", "0.5485966", "0.54624945", "0.544271", "0.5394488", "0.5365429", "0.5348334", "0.5256054", "0.52526796", "0.52368546", "0.5232301", "0.52296406", "0.52292305", "0.5228468", "0.5222687", "0.5209426", "0.51953876", "0.5184957", "0.51785696", "0.51671565", "0.5145136", "0.51360905", "0.51349443", "0.5128847", "0.51225984", "0.51189905", "0.51081276", "0.5096938", "0.5088466", "0.50845987", "0.50820106", "0.5079608", "0.5077443", "0.5076076", "0.5060208", "0.5052138", "0.50481594", "0.5034689", "0.5032782", "0.50284487", "0.50171053", "0.50089765", "0.4993411", "0.4990628", "0.49637273", "0.49568447", "0.49548167", "0.49491572", "0.49491572", "0.4948272", "0.49443898", "0.49407363", "0.49396116", "0.49353543", "0.49332735", "0.4931126", "0.49173492", "0.49042305", "0.48949307", "0.4893348", "0.48776677", "0.4875394", "0.48751324", "0.48716146", "0.48715943", "0.48678932", "0.4865552", "0.48506716", "0.48484418", "0.48462945", "0.48451513", "0.4842102", "0.4831779", "0.481671", "0.4816579", "0.48146164", "0.48134348", "0.48097306", "0.4809454", "0.480628", "0.47987488", "0.47926125", "0.47923362", "0.47922486", "0.4791903", "0.47915506", "0.4791108" ]
0.65957546
0
If a media resource has both an image and YouTube video ID specified then cleaning it should throw an error.
def test_clean_both_image_and_youtube_id(image): resource = models.MediaResource(image=image, youtube_id="dQw4w9WgXcQ") with pytest.raises(ValidationError): resource.clean()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_clean_no_image_or_youtube_id():\n resource = models.MediaResource()\n\n with pytest.raises(ValidationError):\n resource.clean()", "def test_clean_only_youtube_id():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n resource.clean()", "def test_clean_only_image(image):\n resource = models.MediaResource(image=image)\n\n resource.clean()", "def verify_media(self):\n self.check_dataset_duplicate_ids(self.media)", "def test_video_delete(self):\n v1, v2 = make_video(media_id='1234'), make_video(media_id='2345')\n set_resources_and_sync([v1, v2])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n set_resources_and_sync([v1])\n self.assertIsNone(mpmodels.MediaItem.objects.get(id=i1.id).deleted_at)\n self.assertIsNotNone(mpmodels.MediaItem.objects_including_deleted.get(id=i2.id).deleted_at)\n self.assertFalse(mpmodels.MediaItem.objects.filter(id=i2.id).exists())", "def _handle_removed_media(self):\r\n if self.has_media():\r\n try:\r\n image = str(self.image)\r\n os.remove(image)\r\n except OSError:\r\n raise('Failure trying to remove image from filesystem.')\r\n return True", "def clean_video_id(self):\n failed = False\n d = self.cleaned_data\n service = d.get('service')\n # Get the video id and clear whitespace on either side.\n video_id = d.get('video_id', '').strip()\n\n # Validate using YouTube's API:\n if service == 'youtube':\n url = ('http://gdata.youtube.com/feeds/api/videos/{}?alt=json'.\n format(video_id))\n data = requests.get(url)\n # Ensure we can parse the JSON data.\n try:\n json = simplejson.loads(data.text)\n # If not, mark this as a failure.\n except ValueError:\n failed = True\n\n # Validate using Vimeo's API:\n elif service == 'vimeo':\n data = requests.get('http://vimeo.com/api/v2/video/{}.json'.\n format(video_id))\n # Ensure we can parse the JSON data.\n try:\n json = simplejson.loads(data.text)\n # If not, mark this as a failure.\n except ValueError:\n failed = True\n\n # Respond based on the outcome.\n if failed:\n message = _(\"Couldn't validate video id using {} API. Please \"\n \"verify it exists and check for \"\n \"typos.\".format(service))\n raise forms.ValidationError(message)\n\n return video_id", "def delete_video(self, video_ID): # WORKS\n try:\n self.cur.execute(\"DELETE FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n self.db.commit()\n os.remove('static/videos/' + str(video_ID) + '.mp4')\n os.remove('static/images/' + str(video_ID) + '.jpg')\n except:\n self.db.rollback()", "def test_parse_youtube_invalid(self):\r\n\r\n # invalid id\r\n youtube_str = 'thisisaninvalidid'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': '',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})\r\n # another invalid id\r\n youtube_str = ',::,:,,'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': '',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})\r\n\r\n # and another one, partially invalid\r\n youtube_str = '0.75_BAD!!!,1.0:AXdE34_U,1.25:KLHF9K_Y,1.5:VO3SxfeD,'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': '',\r\n '1.00': 'AXdE34_U',\r\n '1.25': 'KLHF9K_Y',\r\n '1.50': 'VO3SxfeD'})", "def test_empty_media(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['inputs']['files'][0]['mediaTypes'] = []\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n del manifest['job']['interface']['inputs']['files'][0]['mediaTypes']\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def test_no_video_image(self):\n edx_video_id = 'test1'\n get_videos_url = reverse_course_url('videos_handler', self.course.id)\n video_image_upload_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n with make_image_file(\n dimensions=(settings.VIDEO_IMAGE_MIN_WIDTH, settings.VIDEO_IMAGE_MIN_HEIGHT),\n ) as image_file:\n self.client.post(video_image_upload_url, {'file': image_file}, format='multipart')\n\n val_image_url = get_course_video_image_url(course_id=self.course.id, edx_video_id=edx_video_id)\n\n response = self.client.get_json(get_videos_url)\n self.assertEqual(response.status_code, 200)\n response_videos = json.loads(response.content.decode('utf-8'))[\"videos\"]\n for response_video in response_videos:\n if response_video['edx_video_id'] == edx_video_id:\n self.assertEqual(response_video['course_video_image_url'], val_image_url)\n else:\n self.assertEqual(response_video['course_video_image_url'], None)", "def _sanitize_resources(cls, resources):\n\n try:\n for resource in cls._loop_raw(resources):\n cls._sanitize_resource(resource)\n except (KeyError, TypeError):\n _LOGGER.debug(\"no shade data available\")\n return None", "def test_type_youtube():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n assert resource.type == models.MediaResource.TYPE_YOUTUBE", "def test_recreate_deleted_item(self):\n v1 = make_video(media_id='1234', title='testing')\n set_resources_and_sync([v1])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n self.assertEqual(i1.title, 'testing')\n i1.delete()\n\n set_resources_and_sync([v1])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n self.assertEqual(i1.title, 'testing')", "def test_video_removal(self):\n edx_video_id = 'test1'\n remove_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n response = self.client.delete(remove_url, HTTP_ACCEPT=\"application/json\")\n self.assertEqual(response.status_code, 204)\n\n self._assert_video_removal(self.url, edx_video_id, 1)", "def test_only_sms_created(self):\n v1, v2 = make_video(media_id='1234'), make_video()\n set_resources_and_sync([v1, v2])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n i2 = mpmodels.MediaItem.objects.filter(jwp__key=v2.key).first()\n self.assertIsNone(i2)", "def test_video_image_validation_message(self, image_data, error_message):\n edx_video_id = 'test1'\n video_image_upload_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n with make_image_file(\n dimensions=(\n image_data.get('width', settings.VIDEO_IMAGE_MIN_WIDTH),\n image_data.get('height', settings.VIDEO_IMAGE_MIN_HEIGHT)\n ),\n prefix=image_data.get('prefix', 'videoimage'),\n extension=image_data.get('extension', '.png'),\n force_size=image_data.get('size', settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MIN_BYTES'])\n ) as image_file:\n response = self.client.post(video_image_upload_url, {'file': image_file}, format='multipart')\n if error_message:\n self.verify_error_message(response, error_message)\n else:\n self.verify_image_upload_reponse(self.course.id, edx_video_id, response)", "def test_str_no_title(media_resource_factory):\n resource = media_resource_factory()\n\n assert str(resource) == str(resource.id)", "def test_upload_image_bad_request(self):\n url = image_upload_url(self.movie.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def clean(self):\n self.clean_rally_conf()\n rally.RallyBase.clean_rally_logs()\n if self.image_alt:\n self.cloud.delete_image(self.image_alt)\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()", "def prepare_media(self, object):\n if object.media is not None:\n #return object.media.media_file.name\n return '/api/v1/media/{0}/'.format(object.media.id)\n else:\n return ''", "def clear_images(self):\r\n\r\n with translate_errors():\r\n self.audio.clear_pictures()\r\n self.audio.save()\r\n\r\n super().clear_images()", "def get_clean_image(image):\n if not image:\n return \"\"\n if \"music@\" in image:\n # fix for embedded images\n thumbcache = xbmc.getCacheThumbName(image).replace(\".tbn\", \".jpg\")\n thumbcache = \"special://thumbnails/%s/%s\" % (thumbcache[0], thumbcache)\n if not xbmcvfs.exists(thumbcache):\n xbmcvfs.copy(image, thumbcache)\n image = thumbcache\n if image and \"image://\" in image:\n image = image.replace(\"image://\", \"\")\n image = urllib.unquote(image.encode(\"utf-8\"))\n if image.endswith(\"/\"):\n image = image[:-1]\n if not isinstance(image, unicode):\n image = image.decode(\"utf8\")\n return image", "def delete(self, mediaId):\n headers = { 'Authorization' : self.client.authorization_header }\n\n response = requests.delete(\n self.client.url + '/media/' + mediaId,\n headers = headers\n )\n\n #print 'Response: ' + response.text\n return json.loads(response.text)", "def test_image_no_requiere_del_campos_description(self):\n self.image_obj.description = ''\n\n # Si no hay error, todo OK.\n self.image_obj.save()", "def test_invalidate_removed_resource(self):\n workflow1 = self.get_workflow(\n \"\"\"file://file2 <- file://file1\n\nfile://file3 <- file://file1\n\"\"\")\n workflow2 = self.get_workflow(\n \"\"\"file://file3 <- file://file1\n\"\"\")\n\n invalid = workflow1.resources_not_created_the_same_way(workflow2)\n assert len(invalid) == 1, [(res.url, reason._reason) for (res, reason,) in invalid]\n (resource, invalidation_reason) = invalid[0]\n assert resource.url == \"file://file2\"\n assert invalidation_reason == NO_LONGER_CREATED, invalidation_reason", "def extract_media_v1(data):\n user = data[\"user\"]\n location = data.get(\"location\")\n if location:\n location = {\"pk\": int(location.get(\"pk\")), \"name\": location.get(\"name\")}\n video_url = \"\"\n if \"video_versions\" in data:\n # Select Best Quality by Resolutiuon\n video_url = sorted(\n data[\"video_versions\"], key=lambda o: o[\"height\"] * o[\"width\"]\n ).pop()[\"url\"]\n product_type = data.get(\"product_type\", \"\")\n if data[\"media_type\"] == 2 and not product_type:\n product_type = \"feed\"\n thumbnail_url = ''\n if 'image_versions2' in data:\n thumbnail_url = sorted(\n data[\"image_versions2\"][\"candidates\"],\n key=lambda o: o[\"height\"] * o[\"width\"],\n ).pop()[\"url\"]\n return {\n \"pk\": int(data[\"pk\"]),\n \"taken_at\": int(data[\"taken_at\"]),\n \"id\": data[\"id\"],\n \"media_type\": data[\"media_type\"],\n \"product_type\": product_type,\n \"code\": data[\"code\"],\n \"thumbnail_url\": thumbnail_url,\n \"location\": location,\n \"user\": extract_user_short(user),\n \"comment_count\": int(data.get(\"comment_count\") or 0),\n \"like_count\": int(data.get(\"like_count\") or 0), # the media just published has no like_count\n \"caption_text\": json_value(data, \"caption\", \"text\", default=\"\"),\n \"usertags\": [\n extract_usertag(usertag)\n for usertag in data.get(\"usertags\", {}).get(\"in\", [])\n ],\n \"video_url\": video_url,\n \"view_count\": int(data.get('view_count') or 0),\n \"video_duration\": data.get('video_duration'),\n \"title\": data.get(\"title\") or None,\n \"resources\": [\n extract_resource_v1(edge)\n for edge in data.get('carousel_media', [])\n ]\n }", "async def _apply_media_retention_rules(self) -> None:\n # Purge remote media\n if self._media_retention_remote_media_lifetime_ms is not None:\n # Calculate a threshold timestamp derived from the configured lifetime. Any\n # media that has not been accessed since this timestamp will be removed.\n remote_media_threshold_timestamp_ms = (\n self.clock.time_msec() - self._media_retention_remote_media_lifetime_ms\n )\n\n logger.info(\n \"Purging remote media last accessed before\"\n f\" {remote_media_threshold_timestamp_ms}\"\n )\n\n await self.delete_old_remote_media(\n before_ts=remote_media_threshold_timestamp_ms\n )\n\n # And now do the same for local media\n if self._media_retention_local_media_lifetime_ms is not None:\n # This works the same as the remote media threshold\n local_media_threshold_timestamp_ms = (\n self.clock.time_msec() - self._media_retention_local_media_lifetime_ms\n )\n\n logger.info(\n \"Purging local media last accessed before\"\n f\" {local_media_threshold_timestamp_ms}\"\n )\n\n await self.delete_old_local_media(\n before_ts=local_media_threshold_timestamp_ms,\n keep_profiles=True,\n delete_quarantined_media=False,\n delete_protected_media=False,\n )", "def delete_thumbnail(self, thumbnail_name):", "def __sanitize_input(self):\n self.__check_for_video_file()\n self.__manage_output_folder()", "def test_delete_with_bad_id(self):\n resp = self.api_client.delete('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')", "def test_thumbnail(self):\n pub = PublicationFactory(thumbnail__filename=\"tester.jpg\")\n self.assertEqual(\n pub.thumbnail.url, f\"/media/reading/publications/{pub.slug}/tester.jpg\"\n )\n self.assertTrue(\n pub.thumbnail.path.endswith, f\"/reading/publications/{pub.slug}/tester.jpg\"\n )\n\n # Tidy up:\n pub.thumbnail.delete()", "def users_video_delete(self):\n user_email = request.args.get('email')\n video_title = request.args.get('video_title')\n email_token = auth.current_user()[0]\n if not video_title or not user_email:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"video_title or user_email\"))\n return messages.ERROR_JSON % \"video_title or user_email\", 400\n if user_email != email_token and not self.auth_server.profile_query(email_token)[\"admin\"]:\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n try:\n self.media_server.delete_video(user_email, video_title)\n except UnexistentVideoError:\n self.logger.debug((messages.UNEXISTENT_VIDEO_ERROR % (video_title, email_token)))\n return messages.UNEXISTENT_VIDEO_ERROR % (video_title, email_token), 404\n self.video_database.delete_video(user_email, video_title)\n return messages.SUCCESS_JSON, 200", "def validate_and_insert(self, video_details: dict):\n video_id = video_details.get('id', {}).get('videoId')\n\n if not video_id or self.if_already_present(video_id):\n return\n\n try:\n Video.objects.create(\n etag=video_details['etag'],\n video_id=video_id,\n thumbnails=video_details['snippet']['thumbnails'],\n title=video_details['snippet']['title'],\n description=video_details['snippet']['description'],\n published_at=video_details['snippet']['publishTime']\n )\n except (AttributeError, IntegrityError, ValueError):\n return", "def test_api_video_with_a_thumbnail(self):\n video = factories.VideoFactory(\n pk=\"38a91911-9aee-41e2-94dd-573abda6f48f\",\n uploaded_on=datetime(2018, 8, 8, tzinfo=pytz.utc),\n upload_state=\"ready\",\n resolutions=[144, 240, 480, 720, 1080],\n )\n thumbnail = factories.ThumbnailFactory(\n video=video,\n uploaded_on=datetime(2018, 8, 8, tzinfo=pytz.utc),\n upload_state=\"ready\",\n )\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # Get the video linked to the JWT token\n response = self.client.get(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content[\"thumbnail\"],\n {\n \"active_stamp\": \"1533686400\",\n \"id\": str(thumbnail.id),\n \"is_ready_to_show\": True,\n \"upload_state\": \"ready\",\n \"urls\": {\n \"144\": \"https://abc.cloudfront.net/38a91911-9aee-41e2-94dd-573abda6f48f/\"\n \"thumbnails/1533686400_144.jpg\",\n \"240\": \"https://abc.cloudfront.net/38a91911-9aee-41e2-94dd-573abda6f48f/\"\n \"thumbnails/1533686400_240.jpg\",\n \"480\": \"https://abc.cloudfront.net/38a91911-9aee-41e2-94dd-573abda6f48f/\"\n \"thumbnails/1533686400_480.jpg\",\n \"720\": \"https://abc.cloudfront.net/38a91911-9aee-41e2-94dd-573abda6f48f/\"\n \"thumbnails/1533686400_720.jpg\",\n \"1080\": \"https://abc.cloudfront.net/38a91911-9aee-41e2-94dd-573abda6f48f/\"\n \"thumbnails/1533686400_1080.jpg\",\n },\n \"video\": str(video.id),\n },\n )\n\n self.assertEqual(\n content[\"urls\"][\"thumbnails\"],\n {\n \"144\": \"https://abc.cloudfront.net/38a91911-9aee-41e2-94dd-573abda6f48f/\"\n \"thumbnails/1533686400_144.jpg\",\n \"240\": \"https://abc.cloudfront.net/38a91911-9aee-41e2-94dd-573abda6f48f/\"\n \"thumbnails/1533686400_240.jpg\",\n \"480\": \"https://abc.cloudfront.net/38a91911-9aee-41e2-94dd-573abda6f48f/\"\n \"thumbnails/1533686400_480.jpg\",\n \"720\": \"https://abc.cloudfront.net/38a91911-9aee-41e2-94dd-573abda6f48f/\"\n \"thumbnails/1533686400_720.jpg\",\n \"1080\": \"https://abc.cloudfront.net/38a91911-9aee-41e2-94dd-573abda6f48f/\"\n \"thumbnails/1533686400_1080.jpg\",\n },\n )", "def test_video_image(self):\n edx_video_id = 'test1'\n video_image_upload_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n with make_image_file(\n dimensions=(settings.VIDEO_IMAGE_MIN_WIDTH, settings.VIDEO_IMAGE_MIN_HEIGHT),\n ) as image_file:\n response = self.client.post(video_image_upload_url, {'file': image_file}, format='multipart')\n image_url1 = self.verify_image_upload_reponse(self.course.id, edx_video_id, response)\n\n # upload again to verify that new image is uploaded successfully\n with make_image_file(\n dimensions=(settings.VIDEO_IMAGE_MIN_WIDTH, settings.VIDEO_IMAGE_MIN_HEIGHT),\n ) as image_file:\n response = self.client.post(video_image_upload_url, {'file': image_file}, format='multipart')\n image_url2 = self.verify_image_upload_reponse(self.course.id, edx_video_id, response)\n\n self.assertNotEqual(image_url1, image_url2)", "def mex_validation(resource):\n resource_name = [n for n in list(resource._fields) if getattr(resource,n) != '']\n for name in list(resource_name):\n url = getattr(resource,name)\n log.debug(\"resource: %s\" % url)\n try:\n o = urlparse.urlsplit(url)\n url_path = o.path\n log.debug('url_path :%s' % url_path)\n m = re.match('\\/(?P<service>[\\w-]+)\\/(image[s]?\\/|)(?P<id>[\\w-]+)', url_path)\n if m is not None:\n if m.group('service') == 'image_service' or m.group('service') == 'data_service': #check for data_service\n if 'pixels' not in url_path: #if false requires a redirect\n ident = m.group('id') #seaching a plan image_service or data_service url\n if check_access(ident) is True:\n continue #check next resource\n\n# # Try to route internally through bisque\n# resp = request_internally(url)\n# if resp.status_int < 400:\n# if resp.status_int == 302:\n# #reset the url to the redirected url\n# redirect_url = resp.headers.get('Location')\n# if redirect_url is not None: #did not find the redirect\n# log.debug('Redirect Url: %s' % redirect_url)\n# resource = resource._replace(**{name:redirect_url})\n# continue\n# else:\n# continue\n\n # Try to route externally\n resp = request_externally(url)\n if resp.status_code < 400:\n if resp.status_code == 302:\n #reset the url to the redirected url\n redirect_url = resp.headers.get('Location')\n if redirect_url is not None: #did not find the redirect\n log.debug('Redirect Url: %s' % redirect_url)\n resource = resource._replace(**{name:redirect_url})\n continue\n else:\n continue\n\n raise InvalidResourceError(resource_url=url, error_code=403, error_message='Resource: %s Not Found' % url)\n\n except StandardError:\n log.exception (\"While retrieving URL %s\" %str(resource))\n raise InvalidResourceError(resource_url=url, error_code=403, error_message='Resource: %s Not Found' % url)\n\n return resource", "def test_video_image_no_file(self):\n video_image_upload_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': 'test1'})\n response = self.client.post(video_image_upload_url, {})\n self.verify_error_message(response, 'An image file is required.')", "def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def auto_delete_image_and_thumbnail_on_delete(sender, instance, **kwargs):\n if instance.image:\n if os.path.isfile(instance.image.path):\n os.remove(instance.image.path)\n\n if instance.thumbnail:\n if os.path.isfile(instance.thumbnail.path):\n os.remove(instance.thumbnail.path)\n\n return False", "def remove_thumbnail(inJSON):\n time.sleep(2)\n consoleOutput = exec_console_command(\"rm \" + inJSON + \";\" + constants.getExitStatus)\n\n if \"\\n1\" in consoleOutput:\n raise IOError(\"Thumbnail file doesn't exist to delete. No worries though, it was going to be deleted anyway!\")\n\n return 0", "def test_deletion_of_user_photo_succeeds(self):\n\t\tself.name = 'media.png'\n\t\tself.image = File(open('static/img/media.png', 'rb'))\n\t\tself.created_image = UserPhoto(image=self.image, name=self.name, created_by=self.user)\n\t\tself.created_image.save()\t\t\t\n\t\tresponse = self.client.delete('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def delete_test_image(image_field):\n warnings.warn(DeprecationWarning(\n \"delete_test_image() is deprecated in favour of the \"\n \"get_sample_image() context manager.\"), stacklevel=2)\n # ensure all thumbs are deleted\n for filename in glob.glob(\n os.path.join(\n settings.MEDIA_ROOT, 'thumbs', image_field.name.split('/')[-1]\n ) + '*'\n ):\n os.unlink(filename)\n # delete the saved file\n image_field.delete()", "def prepare_media_url(self, object):\n if object.media is not None:\n return os.path.join(settings.MEDIA_URL, object.media.media_file.name)\n else:\n return ''", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def test_video_thumbnail(self):\n data = TestData().load_users().load_galleries()\n album = data.gallery_b7w.top_album\n\n video1 = Video.objects.create(album=album, uid='56433514', type=Video.VIMIO)\n VideoController.thumbnail_url(video1)\n self.assertHttpOk(VideoController.thumbnail_url(video1))\n\n video2 = Video.objects.create(album=album, uid='7dGGPlZlPQw', type=Video.YOUTUBE)\n self.assertHttpOk(VideoController.thumbnail_url(video2))", "def reject(request, img_id):\n if not request.user.is_staff:\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n else:\n img = Image.objects.get(id=img_id)\n os.remove(os.path.join(settings.STATIC_ROOT, \"uploaded-images\", \"%s.%s\" % (img.image_path, img.extension)))\n os.remove(os.path.join(settings.STATIC_ROOT, \"uploaded-images\", \"%s-thumb.%s\" % (img.image_path, img.extension)))\n img.delete()\n return HttpResponseRedirect(reverse('wainz.views.approve_images'))", "def test_delete_namespaced_image_stream_tag(self):\n pass", "def remove_base64(examples: List[TaskType]) -> List[TaskType]:\n for eg in examples:\n if \"audio\" in eg and eg[\"audio\"].startswith(\"data:\") and \"path\" in eg:\n eg[\"audio\"] = eg[\"path\"]\n if \"video\" in eg and eg[\"video\"].startswith(\"data:\") and \"path\" in eg:\n eg[\"video\"] = eg[\"path\"]\n return examples", "def test_video_removal_multiple_courses(self):\n # remove video from course1\n edx_video_id = 'test1'\n remove_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n response = self.client.delete(remove_url, HTTP_ACCEPT=\"application/json\")\n self.assertEqual(response.status_code, 204)\n\n # verify that video is only deleted from course1 only\n self._assert_video_removal(self.url, edx_video_id, 1)\n self._assert_video_removal(self.get_url_for_course_key(self.course2.id), edx_video_id, 0)", "def updateMedia(request, form, update, m, mt, expires, members, vendors, employees, contractors, filesize):\n\tis360 = False\n\tif 'is_360' in request.POST:\n\t\tis360 = request.POST['is_360']\n\tif update:\n\t\tif m.user == request.user.username or request.user.is_superuser: #need to make sure id is not spoofed\n\t\t\ttry:\n\t\t\t\tm = Media.objects.filter(id=request.POST['formid'])\n\t\t\t\tm.update(name=request.POST['name'].encode('utf-8'), short_description=request.POST['name'].encode('utf-8'),\n\t\t\t\t\t\t description=request.POST['description'].encode('utf-8'), expires=expires,\n\t\t\t\t\t\t retention=request.POST['retention'], visibility=request.POST['privacy'],\n\t\t\t\t\t\t members=members, vendors=vendors, employees=employees, contractors=contractors,is_360=is360)\n\t\t\t\tm = m.get()\n\t\t\t\tprocessTags(request, m, form, update)\n\t\t\t\tprocessCategories(request, m, form, update)\n\t\t\t\treturn m\n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\tpass\n\t\telse:\n\t\t\tsys.exit()\n\telse:\n\t\tm = Media(name=request.POST['name'].encode('utf-8'), short_description=request.POST['name'].encode('utf-8'),\n\t\t\t\t description=request.POST['description'].encode('utf-8'), expires=expires, retention=request.POST['retention'],\n\t\t\t\t upload_date=datetime.datetime.now(), visibility=request.POST['privacy'],\n\t\t\t\t user=request.user.username, views=0, mediatype=mt, filesize=filesize, members=members,\n\t\t\t\t vendors=vendors, employees=employees, contractors=contractors, file=request.FILES['file'],\n\t\t\t\t duration='0',is_360=is360)\n\t\tm.save()\n\t\tprocessTags(request, m, form, update)\n\t\tprocessCategories(request, m, form, update)\n\t\treturn m", "def test_video_image_upload_disabled(self):\n video_image_upload_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': 'test_vid_id'})\n response = self.client.post(video_image_upload_url, {'file': 'dummy_file'}, format='multipart')\n self.assertEqual(response.status_code, 404)", "def delete(self, video_id, subvideo_name):\n\n video = Video.query.get(video_id)\n if not video:\n return {'message': 'video entry not exist'}, http.HTTPStatus.NOT_FOUND\n videofile = VideoFile.query.filter_by(name=subvideo_name).first()\n if videofile:\n videofile.delete()\n else:\n return {'message': 'no related video file'}, http.HTTPStatus.NOT_FOUND\n\n return {'message': 'delete success'}, http.HTTPStatus.NO_CONTENT", "def test_rebuilt_server_image_field(self):\n actual_image_id = self.server.image.id if self.server.image is not None else None\n self.assertEqual(self.expected_image_id, actual_image_id)", "async def _remove_local_media_from_disk(\n self, media_ids: List[str]\n ) -> Tuple[List[str], int]:\n removed_media = []\n for media_id in media_ids:\n logger.info(\"Deleting media with ID '%s'\", media_id)\n full_path = self.filepaths.local_media_filepath(media_id)\n try:\n os.remove(full_path)\n except OSError as e:\n logger.warning(\"Failed to remove file: %r: %s\", full_path, e)\n if e.errno == errno.ENOENT:\n pass\n else:\n continue\n\n thumbnail_dir = self.filepaths.local_media_thumbnail_dir(media_id)\n shutil.rmtree(thumbnail_dir, ignore_errors=True)\n\n await self.store.delete_remote_media(self.server_name, media_id)\n\n await self.store.delete_url_cache((media_id,))\n await self.store.delete_url_cache_media((media_id,))\n\n removed_media.append(media_id)\n\n return removed_media, len(removed_media)", "def delete_media():\n return Response(\"{}\", status=200, mimetype='application/json')\n message_status = request.form[\"MessageStatus\"]\n\n if (message_status == 'delivered'):\n MessageSid = request.form[\"MessageSid\"]\n MediaSid = request.form[\"MediaUrl0\"].split('/')[-1]\n delete_media_file(MessageSid, MediaSid)", "def reject_image(image_id, reason, comment):\n session = Database().Session()\n tkp.db.quality.reject(image_id, reason, comment,session)\n session.commit()", "def check(self):\n #\n # *****************\n # *****************\n # TODO: Check really if video is valid\n # *****************\n # *****************\n return True", "def test_delete_namespaced_image_stream(self):\n pass", "def prune_non_ascii_media_from_db():\n\n try:\n db = database.SessionLocal()\n media_list = crud.get_all_media(db=db)\n pbar_media_list = tqdm(media_list)\n pbar_media_list.set_description('Finding non-ASCII in titles')\n\n for media in pbar_media_list:\n if media.genres.__contains__('Animation') or len(media.genres) == 0:\n\n for letter in media.original_title:\n try:\n letter.encode(encoding='utf-8').decode('ascii')\n except UnicodeDecodeError:\n crud.delete_media_by_id(db=db, media_id=media.id)\n break\n print('Media with non-ASCII titles have been pruned')\n except Exception as e:\n print(e)", "def pre_provider_attachment_delete(self, resource_id):\n pass", "def create_video_thumbnail(self, object_id):\n\n logger.debug(\"Generating thumbnail for video #{}.\".format(object_id))\n\n try:\n video = Video.objects.get(id=object_id)\n except Video.DoesNotExist:\n logger.error(\"Video #{} does not exist.\".format(object_id))\n return\n\n image_path = None\n\n if video.is_image():\n image_path = video.video_file.path\n elif video.is_file():\n try:\n image_file = render_video(video.video_file.path)\n image_path = image_file.name\n except ConversionError as e:\n logger.debug(\"Could not convert video #{}: {}.\"\n .format(object_id, e))\n\n try:\n if image_path:\n assert isfile(image_path) and access(image_path, R_OK) and os.stat(image_path).st_size > 0\n except (AssertionError, TypeError):\n image_path = None\n\n if not image_path:\n image_path = video.find_placeholder()\n\n if not image_path or not os.path.exists(image_path):\n logger.debug(\"Could not find placeholder for video #{}\"\n .format(object_id))\n return\n\n thumbnail_content = None\n try:\n thumbnail_content = generate_thumbnail_content(image_path)\n except MissingPILError:\n logger.error('Pillow not installed, could not generate thumbnail.')\n return\n\n if not thumbnail_content:\n logger.warning(\"Thumbnail for video #{} empty.\".format(object_id))\n filename = 'video-{}-thumb.png'.format(video.uuid)\n video.save_thumbnail(filename, thumbnail_content)\n logger.debug(\"Thumbnail for video #{} created.\".format(object_id))", "def test_requestMalformedAvatarId(self):\n d = self._requestAvatarId(\n UsernamePassword(self.localpart, self.password))\n return self.assertFailure(d, errors.MissingDomainPart)", "def test_media_attribute_blows_up():\n with pytest.raises(AssertionError):\n MediaBag().media", "def test_delete_collection_namespaced_image_stream(self):\n pass", "def clean(context):\n print(f\"Attempting to forcefully remove image {IMAGE_NAME}:{IMAGE_VER}\")\n context.run(f\"docker rmi {IMAGE_NAME}:{IMAGE_VER} --force\")\n print(f\"Successfully removed image {IMAGE_NAME}:{IMAGE_VER}\")", "def remove_media(media, window=None, gui_instance=None):\r\n\r\n cursor = connection.cursor()\r\n\r\n if media.isnumeric(): # CLI-only: The user has attempted to delete the media file based on its ID in the database\r\n cursor.execute(\"SELECT full_path FROM media WHERE id = \" + media)\r\n\r\n full_path = cursor.fetchone()\r\n\r\n if full_path is None: # The system couldn't find the specified ID\r\n print(\"Error: The specified ID does not exist in the database.\")\r\n return\r\n\r\n # Attempting to remove the media file record from the database\r\n try:\r\n cursor.execute(\"DELETE FROM media WHERE id = \" + media) # Deleting the record from the database\r\n\r\n connection.commit() # Writing the changes to the database\r\n\r\n except Error: # Database is locked\r\n print(\"\\nError when trying to commit changes to database. Make sure another application is not using the \"\r\n \"database.\")\r\n\r\n return False\r\n\r\n cursor.close()\r\n\r\n # Attempting to re-order the keys after the deleted one\r\n if not resort_keys(media): # Fatal error: database is locked\r\n print(\"\\nERROR: DATABASE COULD NOT BE UPDATED. APPLICATION CANNOT WORK AS INTENDED. \"\r\n \"PLEASE MANUALLY REMOVE ALL MEDIA FILES FROM THE MEDIA FOLDER AND TRY ADDING THEM BACK.\")\r\n sys.exit() # Quitting; the application will malfunction until the user manually resets the media folder\r\n\r\n try:\r\n os.remove(full_path[0].replace(\"\\\\\", \"/\")) # Removes the media file from the media folder\r\n\r\n except FileNotFoundError:\r\n print(\"\\nError: Could not remove the file from the media folder: The file does not exist.\")\r\n return False\r\n\r\n except PermissionError:\r\n print(\"\\nError: Unable to remove file from the media folder. Make sure you haven't selected a \"\r\n \"write-protected folder. If the issue persists, try changing the media folder and manually removing\"\r\n \" the media file from the current media folder.\")\r\n return False\r\n\r\n print(\"\\nThe media file has been removed.\")\r\n\r\n else: # The user is either using the GUI or has provided the filename as parameter\r\n # Getting the full path of the file (using an app-level convention for slashes)\r\n full_path = os.path.join(media_folder, os.path.basename(media)).replace(\"\\\\\", \"/\")\r\n\r\n if path.exists(full_path): # (CLI-only) Checking if the provided filename exists\r\n\r\n # Getting the id of the media which will be removed in order to re-order the IDs of the database\r\n cursor.execute(\"SELECT id FROM media WHERE full_path = \" + \"\\\"\" + full_path + \"\\\"\")\r\n id_value = cursor.fetchone()\r\n\r\n # Attempting to remove the media file record from the database\r\n try:\r\n cursor.execute(\"DELETE FROM media WHERE full_path = \" + \"\\\"\" + full_path + \"\\\"\")\r\n\r\n connection.commit() # Writing the changes to the database\r\n\r\n except Error: # Database is locked\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Database is locked\", \"Error when trying to commit changes to database. Make \"\r\n \"sure another application is not using the database.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError when trying to commit changes to database. Make sure another application is not \"\r\n \"using the database.\")\r\n\r\n return False\r\n\r\n cursor.close()\r\n\r\n # Attempting to re-order the keys after the deleted one\r\n if not resort_keys(id_value[0]): # Fatal error: database is locked\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Database error\", \"DATABASE COULD NOT BE UPDATED. APPLICATION CANNOT WORK AS \"\r\n \"INTENDED. PLEASE MANUALLY REMOVE ALL MEDIA FILES FROM THE MEDIA FOLDER AND \"\r\n \"TRY ADDING THEM BACK.\")\r\n # Quitting; the application will malfunction until the user manually resets the media folder\r\n sys.exit()\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nERROR: DATABASE COULD NOT BE UPDATED. APPLICATION CANNOT WORK AS INTENDED. \"\r\n \"PLEASE MANUALLY REMOVE ALL MEDIA FILES FROM THE MEDIA FOLDER AND TRY ADDING THEM BACK.\")\r\n # Quitting; the application will malfunction until the user manually resets the media folder\r\n sys.exit()\r\n\r\n try:\r\n os.remove(full_path) # Removes the media file from the media folder\r\n\r\n except FileNotFoundError:\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"File not found\", \"The file does not exist.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError: Could not remove the file from the media folder: The file does not exist.\")\r\n\r\n return False\r\n\r\n except PermissionError:\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Unable to remove file\", \"Unable to remove file from the media folder. Make \"\r\n \"sure you haven't selected a write-protected folder. If the issue persists, \"\r\n \"try changing the media folder and manually removing the media file from the \"\r\n \"current media folder.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError: Unable to remove file from the media folder. Make sure you haven't selected a \"\r\n \"write-protected folder. If the issue persists, try changing the media folder and manually \"\r\n \"removing the media file from the current media folder.\")\r\n\r\n return False\r\n\r\n if gui_instance is not None: # The method has been fired by a GUI widget\r\n window.destroy() # Closes the removal window\r\n\r\n # Reloading the media list of the root window\r\n gui_instance.library_items = []\r\n gui_instance.path_frame_parent.destroy()\r\n gui_instance.display_media()\r\n\r\n else: # The method has been fired by using CLI\r\n print(\"\\nThe media file has been removed.\")\r\n\r\n else: # (CLI-only) The user has provided an invalid filename\r\n print(\"\\nError: The specified media file does not exist.\")\r\n return False\r\n\r\n return True", "def _assert_video_removal(self, url, edx_video_id, deleted_videos):\n response = self.client.get_json(url)\n self.assertEqual(response.status_code, 200)\n response_videos = json.loads(response.content.decode('utf-8'))[\"videos\"]\n self.assertEqual(len(response_videos), len(self.previous_uploads) - deleted_videos)\n\n if deleted_videos:\n self.assertNotIn(edx_video_id, [video.get('edx_video_id') for video in response_videos])\n else:\n self.assertIn(edx_video_id, [video.get('edx_video_id') for video in response_videos])", "def delete_video(event_id, video_id):\n event = Event.query.get_or_404(event_id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n video = Video.query.get_or_404(video_id)\n db.session.delete(video)\n db.session.commit()\n flash(\"Your video has been deleted.\", \"success\")\n return redirect(url_for(\"events.media\", id=event_id))", "def test_get_video_id_from_url(self):\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/embed/DqGwxR_0d1M'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://youtu.be/DqGwxR_0d1M'), 'DqGwxR_0d1M')\n self.assertEqual(\n get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M&feature=youtu.be'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M'),\n 'DqGwxR_0d1M')", "def test_invalid_update_post_form_with_image_missing(\n self, proto_post, proto_user\n ):\n\n data = {\n \"title\": \"This is the modified title\",\n \"categories\": [51, 52],\n \"overview\": \"This is the modified overview\",\n \"content\": \"This is the modified content\",\n \"featured\": True,\n \"status\": 1,\n }\n form = EditForm(data)\n assert not form.is_valid()\n assert len(form.errors) == 1\n assert \"thumbnail\" in form.errors", "def clear_thumbnails(self):", "async def async_media_stop(hass: HomeAssistant, entity_id: str | None = None) -> None:\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_STOP, data)", "def test_embed_ok(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_URL) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"youtube_video\")\n self.find(\"<object width\")\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', NOTAGS_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_EMBED) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"<object width\")", "def test_basic_functionality(self):\n self.assertEqual(mpmodels.MediaItem.objects.count(), 0)\n video = make_video(media_id='1234', title='test title')\n set_resources_and_sync([video])\n self.assertEqual(mpmodels.MediaItem.objects.count(), 1)\n item = mpmodels.MediaItem.objects.get(jwp__key=video.key)\n self.assertEqual(item.title, 'test title')", "def __del__(self):\n if self.video:\n self.video.release()", "def test_delete_collection_image(self):\n pass", "def test_invalid_resource(self, mock_api_handler, mock_set_and_write):\n mock_api_handler.upload_sequencing_run.side_effect = [IridaResourceError(\"\")]\n mock_set_and_write.side_effect = [True]\n\n with self.assertRaises(IridaResourceError):\n upload_helpers.upload_sequencing_run(directory_status='status',\n sequencing_run='run',\n upload_mode='mode')\n\n mock_api_handler.upload_sequencing_run.assert_called_with(directory_status='status',\n sequencing_run='run',\n upload_mode='mode',\n run_id=None)\n mock_set_and_write.assert_called_with(\"status\", DirectoryStatus.ERROR,\n \"Could not access IRIDA resource Errors: ('',)\")", "def test_upload_dropbox_videos_bad_data(logged_in_apiclient):\n client, user = logged_in_apiclient\n collection = CollectionFactory(owner=user)\n url = reverse(\"upload-videos\")\n input_data = {\n \"collection\": collection.hexkey,\n \"files\": [\n {\n \"isDir\": False,\n \"link\": \"http://foo.bar/hoo.mp4\",\n \"thumbnailLink\": \"http://foo.bar.link/hoo.mp4\",\n }\n ],\n }\n assert (\n client.post(url, input_data, format=\"json\").status_code\n == status.HTTP_400_BAD_REQUEST\n )", "def test_api_thumbnail_retrieve_from_another_video(self):\n\n thumbnail = ThumbnailFactory()\n other_video = VideoFactory(playlist=thumbnail.video.playlist)\n\n jwt_token = InstructorOrAdminLtiTokenFactory(playlist=thumbnail.video.playlist)\n\n response = self.client.get(\n self._get_url(other_video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 404)", "def video_no_found(error):\n return {'message': 'video does not exist'}, 404", "def validate(source_media_info: Metadata,\n dest_media_info: Metadata) -> None:\n src_duration = max(source_media_info[VIDEO_DURATION],\n source_media_info[AUDIO_DURATION])\n dst_duration = min(dest_media_info[VIDEO_DURATION],\n dest_media_info[AUDIO_DURATION])\n if dst_duration < DURATION_DELTA * src_duration:\n # Check whether result duration corresponds to source duration\n # (damaged source files may be processed successfully but result\n # is shorter)\n raise TranscodeError(f\"incomplete file: {dst_duration}\")", "def test_youtube_keyerror(self):\n backend = self.instance('http://youtube.com/watch?id=5')\n self.assertRaises(UnknownIdException, backend.get_code)", "def clear_unique_video(self):\n self.top_unique_video_entry.delete(0, END)\n self.top_unique_video_box.delete(0, END)\n self.unique_video_found = False\n self.missing_files_label.grid_remove()", "async def _generate_thumbnails(\n self,\n server_name: Optional[str],\n media_id: str,\n file_id: str,\n media_type: str,\n url_cache: bool = False,\n ) -> Optional[dict]:\n requirements = self._get_thumbnail_requirements(media_type)\n if not requirements:\n return None\n\n input_path = await self.media_storage.ensure_media_is_in_local_cache(\n FileInfo(server_name, file_id, url_cache=url_cache)\n )\n\n try:\n thumbnailer = Thumbnailer(input_path)\n except ThumbnailError as e:\n logger.warning(\n \"Unable to generate thumbnails for remote media %s from %s of type %s: %s\",\n media_id,\n server_name,\n media_type,\n e,\n )\n return None\n\n with thumbnailer:\n m_width = thumbnailer.width\n m_height = thumbnailer.height\n\n if m_width * m_height >= self.max_image_pixels:\n logger.info(\n \"Image too large to thumbnail %r x %r > %r\",\n m_width,\n m_height,\n self.max_image_pixels,\n )\n return None\n\n if thumbnailer.transpose_method is not None:\n m_width, m_height = await defer_to_thread(\n self.hs.get_reactor(), thumbnailer.transpose\n )\n\n # We deduplicate the thumbnail sizes by ignoring the cropped versions if\n # they have the same dimensions of a scaled one.\n thumbnails: Dict[Tuple[int, int, str], str] = {}\n for requirement in requirements:\n if requirement.method == \"crop\":\n thumbnails.setdefault(\n (requirement.width, requirement.height, requirement.media_type),\n requirement.method,\n )\n elif requirement.method == \"scale\":\n t_width, t_height = thumbnailer.aspect(\n requirement.width, requirement.height\n )\n t_width = min(m_width, t_width)\n t_height = min(m_height, t_height)\n thumbnails[\n (t_width, t_height, requirement.media_type)\n ] = requirement.method\n\n # Now we generate the thumbnails for each dimension, store it\n for (t_width, t_height, t_type), t_method in thumbnails.items():\n # Generate the thumbnail\n if t_method == \"crop\":\n t_byte_source = await defer_to_thread(\n self.hs.get_reactor(),\n thumbnailer.crop,\n t_width,\n t_height,\n t_type,\n )\n elif t_method == \"scale\":\n t_byte_source = await defer_to_thread(\n self.hs.get_reactor(),\n thumbnailer.scale,\n t_width,\n t_height,\n t_type,\n )\n else:\n logger.error(\"Unrecognized method: %r\", t_method)\n continue\n\n if not t_byte_source:\n continue\n\n file_info = FileInfo(\n server_name=server_name,\n file_id=file_id,\n url_cache=url_cache,\n thumbnail=ThumbnailInfo(\n width=t_width,\n height=t_height,\n method=t_method,\n type=t_type,\n ),\n )\n\n with self.media_storage.store_into_file(file_info) as (\n f,\n fname,\n finish,\n ):\n try:\n await self.media_storage.write_to_file(t_byte_source, f)\n await finish()\n finally:\n t_byte_source.close()\n\n t_len = os.path.getsize(fname)\n\n # Write to database\n if server_name:\n # Multiple remote media download requests can race (when\n # using multiple media repos), so this may throw a violation\n # constraint exception. If it does we'll delete the newly\n # generated thumbnail from disk (as we're in the ctx\n # manager).\n #\n # However: we've already called `finish()` so we may have\n # also written to the storage providers. This is preferable\n # to the alternative where we call `finish()` *after* this,\n # where we could end up having an entry in the DB but fail\n # to write the files to the storage providers.\n try:\n await self.store.store_remote_media_thumbnail(\n server_name,\n media_id,\n file_id,\n t_width,\n t_height,\n t_type,\n t_method,\n t_len,\n )\n except Exception as e:\n thumbnail_exists = (\n await self.store.get_remote_media_thumbnail(\n server_name,\n media_id,\n t_width,\n t_height,\n t_type,\n )\n )\n if not thumbnail_exists:\n raise e\n else:\n await self.store.store_local_thumbnail(\n media_id, t_width, t_height, t_type, t_method, t_len\n )\n\n return {\"width\": m_width, \"height\": m_height}", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def allow_video(self, video_id):\n video = self._video_library.get_video(video_id)\n if not self._video_library.get_video(video_id):\n print(\"Cannot remove flag from video: Video does not exist\")\n return\n if not video.flag:\n print(\"Cannot remove flag from video: Video is not flagged\")\n return\n print(f\"Successfully removed flag from video: {video.title}\")\n video.set_flag(None)", "def prepare_thumbnail_url(self, object):\n if object.media is not None:\n return os.path.join(settings.MEDIA_URL, object.media.media_thumb_file.name)\n else:\n return ''", "def allow_video(self, video_id):\n if self._video_library.get_video(video_id) is None:\n print(\"Cannot remove flag from video: Video does not exist\")\n elif not self._video_library.get_video(video_id).flagged:\n print(\"Cannot remove flag from video: Video is not flagged\")\n else:\n print(f\"Successfully removed flag from video: {self._video_library.get_video(video_id).title}\")\n self._video_library.get_video(video_id).flagged = False\n self._video_library.get_video(video_id).flag_reason = \"Not supplied\"", "def clean(self):\n if self.image:\n self.glance.images.delete(self.image['id'])\n\n if self.image_file:\n shutil.rmtree(self.download_path)", "def test_remove_category_from_asset(self):\n pass", "def test_delete_image(self):\n pass", "def test_upload_image_bad(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image': 'notimage', format: 'multipart'})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url,{'image':'notimage'},format='multipart')\n self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)", "def InsertVideo(self, album_or_uri, video, filename_or_handle, content_type='image/jpeg'):\n try:\n assert(isinstance(video, VideoEntry))\n except AssertionError:\n raise GooglePhotosException({\n 'status': GPHOTOS_INVALID_ARGUMENT,\n 'body': '`video` must be a gdata.photos.VideoEntry instance',\n 'reason': 'Found %s, not PhotoEntry' % type(video)\n })\n try:\n majtype, mintype = content_type.split('/')\n # assert(mintype in SUPPORTED_UPLOAD_TYPES)\n except (ValueError, AssertionError):\n raise GooglePhotosException({\n 'status': GPHOTOS_INVALID_CONTENT_TYPE,\n 'body': 'This is not a valid content type: %s' % content_type,\n 'reason': 'Accepted content types:'\n })\n if isinstance(filename_or_handle, (str, unicode)) and \\\n os.path.exists(filename_or_handle): # it's a file name\n mediasource = gdata.MediaSource()\n mediasource.setFile(filename_or_handle, content_type)\n elif hasattr(filename_or_handle, 'read'):# it's a file-like resource\n if hasattr(filename_or_handle, 'seek'):\n filename_or_handle.seek(0) # rewind pointer to the start of the file\n # gdata.MediaSource needs the content length, so read the whole image\n file_handle = StringIO.StringIO(filename_or_handle.read())\n name = 'image'\n if hasattr(filename_or_handle, 'name'):\n name = filename_or_handle.name\n mediasource = gdata.MediaSource(file_handle, content_type,\n content_length=file_handle.len, file_name=name)\n else: #filename_or_handle is not valid\n raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,\n 'body':'`filename_or_handle` must be a path name or a file-like object',\n 'reason':'Found %s, not path name or object with a .read() method' % \\\n type(filename_or_handle)\n })\n\n if isinstance(album_or_uri, (str, unicode)): # it's a uri\n feed_uri = album_or_uri\n elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object\n feed_uri = album_or_uri.GetFeedLink().href\n\n try:\n return self.Post(video, uri=feed_uri, media_source=mediasource,\n converter=None)\n except gdata.service.RequestError as e:\n raise GooglePhotosException(e.args[0])", "def clean(self):\n self.clean_rally_conf()\n self.clean_rally_logs()\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()", "def clean_resource(self, experiment_id):\n if not self.local_mode:\n self.resource_manager.delete_firehose_stream(experiment_id)\n\n # clean athena tables\n logger.info(f\"Deleting athena tables for '{experiment_id}'...\")\n last_join_job = JoinManager(\n join_db_client=self.join_db_client,\n experiment_id=self.experiment_id,\n join_job_id=self.last_joined_job_id,\n )\n last_join_job._delete_obs_table_if_exist()\n last_join_job._delete_rewards_table_if_exist()\n\n logger.info(f\"Deleting hosting endpoint '{experiment_id}'...\")\n self.sagemaker_session.delete_endpoint_config(experiment_id)\n self.sagemaker_session.delete_endpoint(experiment_id)", "def clean_webp_textures():\n for webp in PNG_TEXTURES['output_files']:\n if os.path.isfile(webp):\n os.remove(webp)", "def fix_related_resource_ids(report: Report | None, tmp_dir: str) -> None:\n\n if report and report.image_cached_results:\n for cached_result in report.image_cached_results:\n related_resource_id = cached_result.get(\"relatedResourceId\")\n if related_resource_id and isinstance(related_resource_id, str):\n cached_result[\"relatedResourceId\"] = related_resource_id.replace(tmp_dir, \"\", 1)", "def cleanup_resources_from_task_run(self, task_run: \"TaskRun\", server_url: str) -> None:\n pass" ]
[ "0.7774484", "0.7352817", "0.6742315", "0.6415747", "0.59018755", "0.5872863", "0.58247", "0.57007104", "0.55338883", "0.5439113", "0.54177034", "0.53636616", "0.5305156", "0.53002834", "0.52959025", "0.51120865", "0.51028275", "0.50996584", "0.5081952", "0.5054293", "0.5038761", "0.50347155", "0.50297874", "0.50283825", "0.5011697", "0.4982682", "0.49596292", "0.49486077", "0.4939692", "0.49360138", "0.49245623", "0.49227336", "0.49177003", "0.48968714", "0.48763835", "0.48684487", "0.48634115", "0.4855824", "0.48479444", "0.48407096", "0.48322463", "0.48314115", "0.48271596", "0.48268014", "0.48264354", "0.48125878", "0.48124534", "0.48009712", "0.47978055", "0.47808766", "0.47793067", "0.47775677", "0.4776597", "0.4773195", "0.4772813", "0.47621247", "0.47602674", "0.47594783", "0.47587126", "0.47549003", "0.47528", "0.47507462", "0.47497332", "0.47424668", "0.47354656", "0.47317508", "0.4729495", "0.47257686", "0.47227994", "0.47190985", "0.4716161", "0.47161382", "0.4712474", "0.47083268", "0.46991262", "0.46881095", "0.46870908", "0.4684432", "0.46815768", "0.4679848", "0.4678914", "0.46739975", "0.4672329", "0.46696886", "0.46686208", "0.46667233", "0.4665773", "0.46620488", "0.46587306", "0.46574253", "0.46496996", "0.4647027", "0.46356004", "0.46284246", "0.46283132", "0.46276277", "0.46250325", "0.46234027", "0.4615864", "0.4614213" ]
0.78584284
0
If a media resource does not encapsulate any media, cleaning it should throw an error.
def test_clean_no_image_or_youtube_id(): resource = models.MediaResource() with pytest.raises(ValidationError): resource.clean()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_removed_media(self):\r\n if self.has_media():\r\n try:\r\n image = str(self.image)\r\n os.remove(image)\r\n except OSError:\r\n raise('Failure trying to remove image from filesystem.')\r\n return True", "def test_clean_only_image(image):\n resource = models.MediaResource(image=image)\n\n resource.clean()", "def test_clean_both_image_and_youtube_id(image):\n resource = models.MediaResource(image=image, youtube_id=\"dQw4w9WgXcQ\")\n\n with pytest.raises(ValidationError):\n resource.clean()", "def _finalize(self):\n if self.url and self.url.startswith('file://'):\n self.parse_external_files(self.url[7:])\n Media._finalize(self)", "def test_clean_only_youtube_id():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n resource.clean()", "def verify_media(self):\n self.check_dataset_duplicate_ids(self.media)", "def test_empty_media(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['inputs']['files'][0]['mediaTypes'] = []\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n del manifest['job']['interface']['inputs']['files'][0]['mediaTypes']\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def delete_media():\n return Response(\"{}\", status=200, mimetype='application/json')\n message_status = request.form[\"MessageStatus\"]\n\n if (message_status == 'delivered'):\n MessageSid = request.form[\"MessageSid\"]\n MediaSid = request.form[\"MediaUrl0\"].split('/')[-1]\n delete_media_file(MessageSid, MediaSid)", "def test_media_attribute_blows_up():\n with pytest.raises(AssertionError):\n MediaBag().media", "async def _apply_media_retention_rules(self) -> None:\n # Purge remote media\n if self._media_retention_remote_media_lifetime_ms is not None:\n # Calculate a threshold timestamp derived from the configured lifetime. Any\n # media that has not been accessed since this timestamp will be removed.\n remote_media_threshold_timestamp_ms = (\n self.clock.time_msec() - self._media_retention_remote_media_lifetime_ms\n )\n\n logger.info(\n \"Purging remote media last accessed before\"\n f\" {remote_media_threshold_timestamp_ms}\"\n )\n\n await self.delete_old_remote_media(\n before_ts=remote_media_threshold_timestamp_ms\n )\n\n # And now do the same for local media\n if self._media_retention_local_media_lifetime_ms is not None:\n # This works the same as the remote media threshold\n local_media_threshold_timestamp_ms = (\n self.clock.time_msec() - self._media_retention_local_media_lifetime_ms\n )\n\n logger.info(\n \"Purging local media last accessed before\"\n f\" {local_media_threshold_timestamp_ms}\"\n )\n\n await self.delete_old_local_media(\n before_ts=local_media_threshold_timestamp_ms,\n keep_profiles=True,\n delete_quarantined_media=False,\n delete_protected_media=False,\n )", "def clear_images(self):\r\n\r\n with translate_errors():\r\n self.audio.clear_pictures()\r\n self.audio.save()\r\n\r\n super().clear_images()", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def delete(self, mediaId):\n headers = { 'Authorization' : self.client.authorization_header }\n\n response = requests.delete(\n self.client.url + '/media/' + mediaId,\n headers = headers\n )\n\n #print 'Response: ' + response.text\n return json.loads(response.text)", "def test_media_attribute_is_fine_after_being_set():\n b = MediaBag()\n b.media = None\n assert b.media is None", "def delete_media_file(MessageSid, MediaSid):\n delete_content = client.messages(MessageSid).media(MediaSid).delete()\n if (delete_content == True):\n print(\"Content deleted\", MessageSid, MediaSid)\n else:\n print(\"Content NOT deleted\")", "def cleanup_old_backups(self):\n print(\"Cleaning Old Backups for media files\")\n\n file_list = utils.get_backup_file_list(\n self.get_databasename(),\n self.get_servername(),\n 'media.tar.gz',\n self.storage\n )\n\n for backup_date, filename in file_list[0:-dbbackup_settings.CLEANUP_KEEP_MEDIA]:\n if int(backup_date.strftime(\"%d\")) != 1:\n print(\" Deleting: %s\" % filename)\n self.storage.delete_file(filename)", "def clean_error(self):\r\n return self._arm.clean_error()", "def remove_media(media, window=None, gui_instance=None):\r\n\r\n cursor = connection.cursor()\r\n\r\n if media.isnumeric(): # CLI-only: The user has attempted to delete the media file based on its ID in the database\r\n cursor.execute(\"SELECT full_path FROM media WHERE id = \" + media)\r\n\r\n full_path = cursor.fetchone()\r\n\r\n if full_path is None: # The system couldn't find the specified ID\r\n print(\"Error: The specified ID does not exist in the database.\")\r\n return\r\n\r\n # Attempting to remove the media file record from the database\r\n try:\r\n cursor.execute(\"DELETE FROM media WHERE id = \" + media) # Deleting the record from the database\r\n\r\n connection.commit() # Writing the changes to the database\r\n\r\n except Error: # Database is locked\r\n print(\"\\nError when trying to commit changes to database. Make sure another application is not using the \"\r\n \"database.\")\r\n\r\n return False\r\n\r\n cursor.close()\r\n\r\n # Attempting to re-order the keys after the deleted one\r\n if not resort_keys(media): # Fatal error: database is locked\r\n print(\"\\nERROR: DATABASE COULD NOT BE UPDATED. APPLICATION CANNOT WORK AS INTENDED. \"\r\n \"PLEASE MANUALLY REMOVE ALL MEDIA FILES FROM THE MEDIA FOLDER AND TRY ADDING THEM BACK.\")\r\n sys.exit() # Quitting; the application will malfunction until the user manually resets the media folder\r\n\r\n try:\r\n os.remove(full_path[0].replace(\"\\\\\", \"/\")) # Removes the media file from the media folder\r\n\r\n except FileNotFoundError:\r\n print(\"\\nError: Could not remove the file from the media folder: The file does not exist.\")\r\n return False\r\n\r\n except PermissionError:\r\n print(\"\\nError: Unable to remove file from the media folder. Make sure you haven't selected a \"\r\n \"write-protected folder. If the issue persists, try changing the media folder and manually removing\"\r\n \" the media file from the current media folder.\")\r\n return False\r\n\r\n print(\"\\nThe media file has been removed.\")\r\n\r\n else: # The user is either using the GUI or has provided the filename as parameter\r\n # Getting the full path of the file (using an app-level convention for slashes)\r\n full_path = os.path.join(media_folder, os.path.basename(media)).replace(\"\\\\\", \"/\")\r\n\r\n if path.exists(full_path): # (CLI-only) Checking if the provided filename exists\r\n\r\n # Getting the id of the media which will be removed in order to re-order the IDs of the database\r\n cursor.execute(\"SELECT id FROM media WHERE full_path = \" + \"\\\"\" + full_path + \"\\\"\")\r\n id_value = cursor.fetchone()\r\n\r\n # Attempting to remove the media file record from the database\r\n try:\r\n cursor.execute(\"DELETE FROM media WHERE full_path = \" + \"\\\"\" + full_path + \"\\\"\")\r\n\r\n connection.commit() # Writing the changes to the database\r\n\r\n except Error: # Database is locked\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Database is locked\", \"Error when trying to commit changes to database. Make \"\r\n \"sure another application is not using the database.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError when trying to commit changes to database. Make sure another application is not \"\r\n \"using the database.\")\r\n\r\n return False\r\n\r\n cursor.close()\r\n\r\n # Attempting to re-order the keys after the deleted one\r\n if not resort_keys(id_value[0]): # Fatal error: database is locked\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Database error\", \"DATABASE COULD NOT BE UPDATED. APPLICATION CANNOT WORK AS \"\r\n \"INTENDED. PLEASE MANUALLY REMOVE ALL MEDIA FILES FROM THE MEDIA FOLDER AND \"\r\n \"TRY ADDING THEM BACK.\")\r\n # Quitting; the application will malfunction until the user manually resets the media folder\r\n sys.exit()\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nERROR: DATABASE COULD NOT BE UPDATED. APPLICATION CANNOT WORK AS INTENDED. \"\r\n \"PLEASE MANUALLY REMOVE ALL MEDIA FILES FROM THE MEDIA FOLDER AND TRY ADDING THEM BACK.\")\r\n # Quitting; the application will malfunction until the user manually resets the media folder\r\n sys.exit()\r\n\r\n try:\r\n os.remove(full_path) # Removes the media file from the media folder\r\n\r\n except FileNotFoundError:\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"File not found\", \"The file does not exist.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError: Could not remove the file from the media folder: The file does not exist.\")\r\n\r\n return False\r\n\r\n except PermissionError:\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Unable to remove file\", \"Unable to remove file from the media folder. Make \"\r\n \"sure you haven't selected a write-protected folder. If the issue persists, \"\r\n \"try changing the media folder and manually removing the media file from the \"\r\n \"current media folder.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError: Unable to remove file from the media folder. Make sure you haven't selected a \"\r\n \"write-protected folder. If the issue persists, try changing the media folder and manually \"\r\n \"removing the media file from the current media folder.\")\r\n\r\n return False\r\n\r\n if gui_instance is not None: # The method has been fired by a GUI widget\r\n window.destroy() # Closes the removal window\r\n\r\n # Reloading the media list of the root window\r\n gui_instance.library_items = []\r\n gui_instance.path_frame_parent.destroy()\r\n gui_instance.display_media()\r\n\r\n else: # The method has been fired by using CLI\r\n print(\"\\nThe media file has been removed.\")\r\n\r\n else: # (CLI-only) The user has provided an invalid filename\r\n print(\"\\nError: The specified media file does not exist.\")\r\n return False\r\n\r\n return True", "def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def cleanupResources():\n None", "def _sanitize_resources(cls, resources):\n\n try:\n for resource in cls._loop_raw(resources):\n cls._sanitize_resource(resource)\n except (KeyError, TypeError):\n _LOGGER.debug(\"no shade data available\")\n return None", "def test_delete_collection_namespaced_image_stream(self):\n pass", "def remove_stuff_post_error(self):\n os.system('rm %s' % self.destination)", "def test_delete_namespaced_image_stream(self):\n pass", "def test_delete_res_file_deletes_mp_file_object(composite_resource_with_mp_aggregation, mock_irods):\n\n res, user = composite_resource_with_mp_aggregation\n mp_aggr = next(res.logical_files)\n assert isinstance(mp_aggr, ModelProgramLogicalFile)\n res_file = res.files.first()\n assert ModelProgramResourceFileType.objects.count() == 0\n mp_aggregation = ModelProgramLogicalFile.objects.first()\n # set the res_file as software for this aggregation\n ModelProgramResourceFileType.create(file_type='software', res_file=res_file,\n mp_metadata=mp_aggregation.metadata)\n assert ModelProgramResourceFileType.objects.count() == 1\n # delete res_file\n delete_resource_file(pk=res.short_id, filename_or_id=res_file.id, user=user)\n # mp program file type got deleted\n assert ModelProgramResourceFileType.objects.count() == 0\n assert ModelProgramLogicalFile.objects.count() == 0\n assert not res.dangling_aggregations_exist()", "def clean_resource() -> list:\n helpers.starting_clean_print(RESOURCE_NAME)\n resource_client = boto3.client(BOTO3_NAME)\n resources = get_resources(resource_client)\n terminated_items = delete_resources(resource_client, resources)\n helpers.finished_clean_print(RESOURCE_NAME, terminated_items)\n return terminated_items", "def prepare_media(self, object):\n if object.media is not None:\n #return object.media.media_file.name\n return '/api/v1/media/{0}/'.format(object.media.id)\n else:\n return ''", "def clean(self):\n if self.image:\n self.glance.images.delete(self.image['id'])\n\n if self.image_file:\n shutil.rmtree(self.download_path)", "def tearDown(self):\n if os.path.exists(settings.MEDIA_ROOT):\n shutil.rmtree(settings.MEDIA_ROOT)", "def tearDown(self):\n if os.path.exists(settings.MEDIA_ROOT):\n shutil.rmtree(settings.MEDIA_ROOT)", "def prune_non_ascii_media_from_db():\n\n try:\n db = database.SessionLocal()\n media_list = crud.get_all_media(db=db)\n pbar_media_list = tqdm(media_list)\n pbar_media_list.set_description('Finding non-ASCII in titles')\n\n for media in pbar_media_list:\n if media.genres.__contains__('Animation') or len(media.genres) == 0:\n\n for letter in media.original_title:\n try:\n letter.encode(encoding='utf-8').decode('ascii')\n except UnicodeDecodeError:\n crud.delete_media_by_id(db=db, media_id=media.id)\n break\n print('Media with non-ASCII titles have been pruned')\n except Exception as e:\n print(e)", "async def _try_to_clean(self, ctx: Context):\n if self.cleanup:\n try:\n await ctx.channel.delete_messages(self._messages)\n except:\n pass", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def test_deletion_of_user_photo_succeeds(self):\n\t\tself.name = 'media.png'\n\t\tself.image = File(open('static/img/media.png', 'rb'))\n\t\tself.created_image = UserPhoto(image=self.image, name=self.name, created_by=self.user)\n\t\tself.created_image.save()\t\t\t\n\t\tresponse = self.client.delete('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def clean(self):\n self.clean_rally_conf()\n rally.RallyBase.clean_rally_logs()\n if self.image_alt:\n self.cloud.delete_image(self.image_alt)\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()", "def remove_object_from_handle(self, handle):\n the_lists = get_media_referents(handle, self.dbstate.db)\n object = self.dbstate.db.get_object_from_handle(handle)\n query = DeleteMediaQuery(self.dbstate, self.uistate, handle, the_lists)\n is_used = any(the_lists)\n return (query, is_used, object)", "def clear_renders(self, media_id, owner_username, node):\n\t\ttry:\n\t\t\tmedia_id = validation.media_id(media_id)\n\t\t\towner_username = validation.username(owner_username)\n\t\t\tvalidation.required(node, 'node')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"Clearing renders for image [%s] in user [%s]'s account\" % (media_id, owner_username))\n\n\t\t@stack\n\t\tdef do_clear(result):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tpaths = result[1]\n\t\t\tdl = []\n\t\t\tfor path in paths:\n\t\t\t\tself.log.debug(\"running delete on [%s.jpg]\" % path)\n\t\t\t\tdl.append(self._delete_binary(\"%s.jpg\" % path))\n\t\t\tdList = DeferredList(dl)\n\t\t\tdList.addCallback(lambda _: \"success\")\n\t\t\treturn dList\n\n\n\t\td = self._generate_render_paths(media_id, node, owner_username)\n\t\td.addCallback(do_clear)\n\t\td.addCallback(lambda _: (0, _))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage))\n\t\treturn d", "def clear_images(self):\r\n\r\n # audio = self.MutagenType(self['filename'])\r\n self.audio.pop(\"metadata_block_picture\", None)\r\n self.audio.pop(\"coverart\", None)\r\n self.audio.pop(\"coverartmime\", None)\r\n self.audio.save()", "def test_repair_file(self):\n\n audio_path = self.converter.audio\n self.assertTrue(audio_path.endswith('.wav'))\n # Make sure it can be loaded in moviepy\n clip = AudioFileClip(audio_path)", "def test_image_no_requiere_del_campos_description(self):\n self.image_obj.description = ''\n\n # Si no hay error, todo OK.\n self.image_obj.save()", "def __del__(self):\n if self.video:\n self.video.release()", "def test_delete_namespaced_image_stream_tag(self):\n pass", "def prepare_media_url(self, object):\n if object.media is not None:\n return os.path.join(settings.MEDIA_URL, object.media.media_file.name)\n else:\n return ''", "def testMediaUpload(self):\n self._testUpload(DefaultStorage(), 'media')\n self._testUpload(StaticStorage(), 'static')", "def delete_post_media(self: User, post_id: str) -> Optional[Post]:\n post = dangerously_get_post(post_id)\n if self != post.author:\n raise UnauthorizedAccess()\n\n post.media_list = []\n post.save()\n\n if exists_in_post_cache(post.id):\n # only set in post cache if it already exists\n # post cache should only have reshared posts so it should not cache any deleted post\n set_in_post_cache(post)\n\n return post", "def test_upload_area_cleanup(self):\n vis2_uvid='urn:mrn:stm:service:instance:furuno:vis2'\n p = Path('import')\n files = list(p.glob('**/urn:mrn:s124:*'))\n for item in files:\n print(item)\n os.remove(str(item))\n pass", "def __mediaPlayerStateChanged(self, state):\n if state == QMediaPlayer.StoppedState:\n self.__mediaFile.close()\n self.__mediaFile.remove()\n self.__mediaFile = None", "def test_thumbnail(self):\n pub = PublicationFactory(thumbnail__filename=\"tester.jpg\")\n self.assertEqual(\n pub.thumbnail.url, f\"/media/reading/publications/{pub.slug}/tester.jpg\"\n )\n self.assertTrue(\n pub.thumbnail.path.endswith, f\"/reading/publications/{pub.slug}/tester.jpg\"\n )\n\n # Tidy up:\n pub.thumbnail.delete()", "def test_video_delete(self):\n v1, v2 = make_video(media_id='1234'), make_video(media_id='2345')\n set_resources_and_sync([v1, v2])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n set_resources_and_sync([v1])\n self.assertIsNone(mpmodels.MediaItem.objects.get(id=i1.id).deleted_at)\n self.assertIsNotNone(mpmodels.MediaItem.objects_including_deleted.get(id=i2.id).deleted_at)\n self.assertFalse(mpmodels.MediaItem.objects.filter(id=i2.id).exists())", "def cleanup(e):\n for f in e.files:\n try:\n if os.path.isfile(f):\n os.remove(f)\n except OSError:\n continue\n\n return", "def _cleanup_resourceprovider(self):\n # Disable too broad exception warning\n # pylint: disable=W0703\n self.resourceprovider = ResourceProvider(self.args)\n try:\n self.resourceprovider.cleanup()\n self.logger.info(\"Cleanup done.\")\n except Exception as error:\n self.logger.error(\"Cleanup failed! %s\", error)", "def eject_vmedia(self, device):\n device_name = VALID_VMEDIA_DEVICES.get(device)\n if not device_name:\n raise exception.InvalidInputError(\n \"Invalid device. Valid devices: cd0 or cd1 or hd0 or hd1.\")\n vmedia_partition_id = self.get_vmedia_device_uri(device_name)\n try:\n virtual_media_object = virtual_media.VirtualMedia(\n self._sushy._conn, vmedia_partition_id)\n virtual_media_object.eject_media()\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish System \"%(partition_id)s\" was '\n 'not found. Error %(error)s') %\n {'partition_id': vmedia_partition_id, 'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)", "async def _remove_local_media_from_disk(\n self, media_ids: List[str]\n ) -> Tuple[List[str], int]:\n removed_media = []\n for media_id in media_ids:\n logger.info(\"Deleting media with ID '%s'\", media_id)\n full_path = self.filepaths.local_media_filepath(media_id)\n try:\n os.remove(full_path)\n except OSError as e:\n logger.warning(\"Failed to remove file: %r: %s\", full_path, e)\n if e.errno == errno.ENOENT:\n pass\n else:\n continue\n\n thumbnail_dir = self.filepaths.local_media_thumbnail_dir(media_id)\n shutil.rmtree(thumbnail_dir, ignore_errors=True)\n\n await self.store.delete_remote_media(self.server_name, media_id)\n\n await self.store.delete_url_cache((media_id,))\n await self.store.delete_url_cache_media((media_id,))\n\n removed_media.append(media_id)\n\n return removed_media, len(removed_media)", "def pre_provider_attachment_delete(self, resource_id):\n pass", "def _clear_audio_files(self):\n try:\n shutil.rmtree(self.audio_file_folder)\n except:\n print('Failure to clear audio files in {self.audio_file_folder}')", "def test_str_no_title(media_resource_factory):\n resource = media_resource_factory()\n\n assert str(resource) == str(resource.id)", "def test_remove_asset(self):\n # Create a story\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n # Confirm that the story has no assets\n self.assertEqual(story.assets.count(), 0)\n # create a Section\n section = create_section(title=\"Test Section 1\", story=story)\n # create a HtmlAsset\n asset = HtmlAsset()\n asset.save()\n translation = HtmlAssetTranslation(title='Test Asset', asset=asset)\n translation.save()\n # Assign the asset to the section\n section_asset = SectionAsset(section=section, asset=asset, weight=0)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())", "def remove_media_files_path(self):\n\n if dialog.MessageDialog(programName,\n (\"Removing the path of media files from the project file is irreversible.<br>\"\n \"Are you sure to continue?\"),\n [YES, NO]) == NO:\n return\n\n self.pj = project_functions.remove_media_files_path(self.pj)\n self.projectChanged = True", "def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False", "def cleanup():", "def _cleanup(self):\n pass", "def load_media_failed(self, item, error_code):\n _LOGGER.debug(\n \"[%s %s] Load media failed with code %s(%s) for item %s\",\n self.entity_id,\n self._cast_info.friendly_name,\n error_code,\n MEDIA_PLAYER_ERROR_CODES.get(error_code, \"unknown code\"),\n item,\n )", "def clean(self):\n self.clean_rally_conf()\n self.clean_rally_logs()\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()", "def test_recreate_deleted_item(self):\n v1 = make_video(media_id='1234', title='testing')\n set_resources_and_sync([v1])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n self.assertEqual(i1.title, 'testing')\n i1.delete()\n\n set_resources_and_sync([v1])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n self.assertEqual(i1.title, 'testing')", "def cleanup(self):\n self.log.debug('upm - in upm cleanup()')\n # Add resource setup code here", "def check_media(self, media):\n path = os.path.join(settings.get_path(\"media\"), media)\n if os.path.exists(path):\n return path\n else:\n return False", "def _cleanup(self):\n # delete stdout/stderr\n if os.path.isfile(self.stdout):\n os.unlink(self.stdout)", "def handle_media( environ ):\n # TODO: implement me\n return 200, [], _html.format(\n title = 'MEDIA',\n head = '',\n body = 'MEDIA'\n )", "def remove(self, resource):\r\n self._load_resource(resource)\r\n blob_folder = self._blob_folder()\r\n if not is_folder(blob_folder):\r\n logger.warning(self._context(f\"Container does not exist\"))\r\n is_success = False\r\n else:\r\n logger.info(self._context(f\"Removing container\"))\r\n delete_folder(blob_folder)\r\n is_success = True\r\n\r\n self.disconnect()\r\n return is_success", "async def delete(bot, message):\n reply = message.reply_to_message\n if reply and reply.media:\n msg = await message.reply(\"Processing...⏳\", quote=True)\n else:\n await message.reply('Reply to file with /delete which you want to delete', quote=True)\n return\n\n for file_type in (\"document\", \"video\", \"audio\"):\n media = getattr(reply, file_type, None)\n if media is not None:\n break\n else:\n await msg.edit('This is not supported file format')\n return\n\n result = await Media.collection.delete_one({\n 'file_name': media.file_name,\n 'file_size': media.file_size,\n 'mime_type': media.mime_type\n })\n if result.deleted_count:\n await msg.edit('File is successfully deleted from database')\n else:\n await msg.edit('File not found in database')", "def __del__(self):\n self.video.release()", "def delete(self, med_id):\n query = medic_queries().del_med_by_id()\n\tvalue = get_service().del_content(query,[med_id])\n if value != 1:\n return jsonify(status=404)\n return jsonify(status=200)", "def cleanup(self):\n raise Exception(\"{0} type does not have cleanup implemented\".format(type(self)))", "def _final_cleanup(self):\n # Clean up and remove the temporary gisdbase\n self._cleanup()\n # Remove resource directories\n if \"error\" in self.run_state or \"terminated\" in self.run_state:\n self.storage_interface.remove_resources()", "def preview_file_cleanup(sender, **kwargs):\n\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def cleanup(self, *args, **kwargs):", "def test_delete_stream(self):\n pass", "def remove_mediafile(mediafiles_id):\n mediafile = MediaFiles.query.get(mediafiles_id)\n db_session.delete(mediafile)\n db_session.commit()\n return 'Removed MediaFile #%s \"%s\" from database.' % (mediafiles_id, mediafile.path), 'success'", "def test_delete_system_asset(self):\n pass", "def api_asset_cleanup():\n app.bank.clear()\n return \"\", 200", "def test_remove_asset(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())", "def cleanup(self, campaing_error):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def test_cleanup_on_failure_when_upload(self, mocker):\n remove_spy = mocker.spy(os, 'remove')\n self._retryable.side_effect = requests.HTTPError('Fail')\n\n payload = dict(id=\"B\", data={\"some\": \"data\"}, ai_service='A')\n headers = {'x-rh-identity': 'ABC'}\n self.client.post(self.url, json=payload, headers=headers)\n\n remove_spy.assert_called_once()", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def cleanup(self) -> None:\n raise NotImplementedError()", "def clean_before_save(self, image_only=False):\n self.image = None\n self.animated = False\n if hasattr(self, \"dict_image\"):\n # self.dict_image = None\n delattr(self, \"dict_image\")\n if hasattr(self, \"list_image\"):\n self.list_image = None\n delattr(self, \"list_image\")", "def uses_media(self):\n return True", "def cleanup(self):\n superClass.cleanup(self)\n # TODO Release resources and cleanup stuff here", "def cleanup(self):\n raise NotImplementedError", "def asset_cleanup():\n app.bank = dict()\n return \"Cleaned\", 200", "def _cleanup_ffmpeg(self) -> None:\r\n self.ffmpeg_proc.communicate()\r\n self.ffmpeg_proc = None", "def remove_thumbnail(inJSON):\n time.sleep(2)\n consoleOutput = exec_console_command(\"rm \" + inJSON + \";\" + constants.getExitStatus)\n\n if \"\\n1\" in consoleOutput:\n raise IOError(\"Thumbnail file doesn't exist to delete. No worries though, it was going to be deleted anyway!\")\n\n return 0", "def test_cannot_remove_file(self):\n self.api.remove_file('/some-fake/path/to-delete-file.json')", "def handleCleanMetadataKeep(self):\n logging.debug(\"Removing all metadata found...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.removeAllMeta(filePath)", "def cleanup(self):\n raise NotImplementedError()", "def cleanup(self):\n raise NotImplementedError()", "def has_media(self):\r\n if self.image:\r\n return True\r\n return False", "def remove_post(self, post_id: str) -> bool:\n try:\n self.api.deleteMedia(mediaId=post_id)\n return True\n except BaseException as e:\n print('Error on data %s' % str(e))\n return False" ]
[ "0.69884086", "0.6832411", "0.6076411", "0.5952054", "0.5877735", "0.586007", "0.574167", "0.5715479", "0.56592447", "0.56336623", "0.5573302", "0.55011815", "0.5439052", "0.54167026", "0.5395553", "0.5383151", "0.53797346", "0.532659", "0.52952904", "0.5293007", "0.5271096", "0.5266999", "0.5254611", "0.52485305", "0.52427626", "0.5218256", "0.5203573", "0.5191458", "0.5164114", "0.5164114", "0.515374", "0.5147281", "0.5106598", "0.51059467", "0.5094197", "0.50904536", "0.5083901", "0.5048752", "0.5035644", "0.50254005", "0.4991476", "0.49895227", "0.49871537", "0.4972747", "0.49686947", "0.49661097", "0.49616176", "0.49439913", "0.49438807", "0.4935005", "0.4933928", "0.4932291", "0.48998785", "0.48873332", "0.48848197", "0.48844957", "0.4881508", "0.4880471", "0.4879327", "0.48762372", "0.4874569", "0.48736826", "0.48694107", "0.48654908", "0.48599386", "0.48592672", "0.48559323", "0.48503155", "0.48290762", "0.48284292", "0.48104706", "0.47887433", "0.47870946", "0.47804296", "0.47781608", "0.47765392", "0.47748277", "0.477141", "0.4771356", "0.47703615", "0.47651744", "0.4764875", "0.4757146", "0.47566524", "0.47566524", "0.47566524", "0.47566384", "0.47539458", "0.47518647", "0.47454473", "0.47405666", "0.4738986", "0.47346693", "0.47321323", "0.473178", "0.4719265", "0.4715333", "0.4715333", "0.47142613", "0.47120228" ]
0.70109206
0
Cleaning a media resource that only has an image should do nothing.
def test_clean_only_image(image): resource = models.MediaResource(image=image) resource.clean()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_removed_media(self):\r\n if self.has_media():\r\n try:\r\n image = str(self.image)\r\n os.remove(image)\r\n except OSError:\r\n raise('Failure trying to remove image from filesystem.')\r\n return True", "def clean_before_save(self, image_only=False):\n self.image = None\n self.animated = False\n if hasattr(self, \"dict_image\"):\n # self.dict_image = None\n delattr(self, \"dict_image\")\n if hasattr(self, \"list_image\"):\n self.list_image = None\n delattr(self, \"list_image\")", "def test_clean_no_image_or_youtube_id():\n resource = models.MediaResource()\n\n with pytest.raises(ValidationError):\n resource.clean()", "def remove_image_file(sender, instance, **kwargs):\n # Pass false so ImageField doesn't save the model.\n instance.image.delete(False)", "def clean(self):\n if self.image:\n self.glance.images.delete(self.image['id'])\n\n if self.image_file:\n shutil.rmtree(self.download_path)", "def clear_images(self):\r\n\r\n with translate_errors():\r\n self.audio.clear_pictures()\r\n self.audio.save()\r\n\r\n super().clear_images()", "def clear_images(self):\r\n\r\n # audio = self.MutagenType(self['filename'])\r\n self.audio.pop(\"metadata_block_picture\", None)\r\n self.audio.pop(\"coverart\", None)\r\n self.audio.pop(\"coverartmime\", None)\r\n self.audio.save()", "def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def applyMorphologicalCleaning(self, image):", "def clear_renders(self, media_id, owner_username, node):\n\t\ttry:\n\t\t\tmedia_id = validation.media_id(media_id)\n\t\t\towner_username = validation.username(owner_username)\n\t\t\tvalidation.required(node, 'node')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"Clearing renders for image [%s] in user [%s]'s account\" % (media_id, owner_username))\n\n\t\t@stack\n\t\tdef do_clear(result):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tpaths = result[1]\n\t\t\tdl = []\n\t\t\tfor path in paths:\n\t\t\t\tself.log.debug(\"running delete on [%s.jpg]\" % path)\n\t\t\t\tdl.append(self._delete_binary(\"%s.jpg\" % path))\n\t\t\tdList = DeferredList(dl)\n\t\t\tdList.addCallback(lambda _: \"success\")\n\t\t\treturn dList\n\n\n\t\td = self._generate_render_paths(media_id, node, owner_username)\n\t\td.addCallback(do_clear)\n\t\td.addCallback(lambda _: (0, _))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage))\n\t\treturn d", "def unpropagateImage(self, dryrun):\n pass", "def clearImage(self):\n if self.hasImage():\n self.scene.removeItem(self._image)\n self._image = None", "def delete_test_image(image_field):\n warnings.warn(DeprecationWarning(\n \"delete_test_image() is deprecated in favour of the \"\n \"get_sample_image() context manager.\"), stacklevel=2)\n # ensure all thumbs are deleted\n for filename in glob.glob(\n os.path.join(\n settings.MEDIA_ROOT, 'thumbs', image_field.name.split('/')[-1]\n ) + '*'\n ):\n os.unlink(filename)\n # delete the saved file\n image_field.delete()", "def test_imagefield_annotate_with_bitmap_image_after_clean(self):\n from PIL.BmpImagePlugin import BmpImageFile\n try:\n Image.register_mime(BmpImageFile.format, None)\n f = ImageField()\n img_path = get_img_path('filepath_test_files/1x1.bmp')\n with open(img_path, 'rb') as img_file:\n img_data = img_file.read()\n\n img_file = SimpleUploadedFile('1x1.bmp', img_data)\n img_file.content_type = 'text/plain'\n\n uploaded_file = f.clean(img_file)\n\n self.assertEqual('BMP', uploaded_file.image.format)\n self.assertIsNone(uploaded_file.content_type)\n finally:\n Image.register_mime(BmpImageFile.format, 'image/bmp')", "def get_clean_image(image):\n if not image:\n return \"\"\n if \"music@\" in image:\n # fix for embedded images\n thumbcache = xbmc.getCacheThumbName(image).replace(\".tbn\", \".jpg\")\n thumbcache = \"special://thumbnails/%s/%s\" % (thumbcache[0], thumbcache)\n if not xbmcvfs.exists(thumbcache):\n xbmcvfs.copy(image, thumbcache)\n image = thumbcache\n if image and \"image://\" in image:\n image = image.replace(\"image://\", \"\")\n image = urllib.unquote(image.encode(\"utf-8\"))\n if image.endswith(\"/\"):\n image = image[:-1]\n if not isinstance(image, unicode):\n image = image.decode(\"utf8\")\n return image", "def except_image_only(resource):\n if resource.image is None:\n raise FeatureExtractionError(resource, 400, 'Image resource is required')\n if resource.mask:\n raise FeatureExtractionError(resource, 400, 'Mask resource is not accepted')\n if resource.gobject:\n raise FeatureExtractionError(resource, 400, 'Gobject resource is not accepted')", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def __check_delete_images(self):\n for i, (ctr, usingproperty) in enumerate(zip(self.__using_image_counter, self.__accessed_image)):\n if not ctr and not usingproperty:\n self.__images_cache[i] = None", "def test_clean_both_image_and_youtube_id(image):\n resource = models.MediaResource(image=image, youtube_id=\"dQw4w9WgXcQ\")\n\n with pytest.raises(ValidationError):\n resource.clean()", "def strip(self):\n result = library.MagickStripImage(self.wand)\n if not result:\n self.raise_exception()", "def clearImage(self):\n if self.hasImage():\n self.scene.removeItem(self._pixmapHandle)\n self._pixmapHandle = None\n self.zoom=-1\n self.scene.clear()", "def clean(context):\n print(f\"Attempting to forcefully remove image {IMAGE_NAME}:{IMAGE_VER}\")\n context.run(f\"docker rmi {IMAGE_NAME}:{IMAGE_VER} --force\")\n print(f\"Successfully removed image {IMAGE_NAME}:{IMAGE_VER}\")", "def _ensure_empty_image_ok(self):\n if self.ignore_empty:\n return\n\n if len(self) > 1:\n raise RuntimeError(\n \"Cannot write None image at extension %d\" % len(self))\n if 'ndims' in self[0]._info:\n raise RuntimeError(\"Can only write None images to extension zero, \"\n \"which already exists\")", "def clear_thumbnails(self):", "def test_cambia_imagen_elimina_la_antigua(self):\n self.image_path = os.path.join(os.path.dirname(__file__), 'image_for_model2.jpg')\n image_path = self.image_obj.image.path\n self.image_obj.image = simple_uploaded_file(self.image_path)\n self.image_obj.save()\n\n self.assertNotEqual(image_path, self.image_obj.image.path)\n self.assertFalse(os.path.exists(image_path))", "def test_delete_namespaced_image_stream(self):\n pass", "def check_files(self):\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)", "def clean(self):\n self.clean_rally_conf()\n rally.RallyBase.clean_rally_logs()\n if self.image_alt:\n self.cloud.delete_image(self.image_alt)\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()", "def test_delete_collection_namespaced_image_stream(self):\n pass", "def clean(self):\n tags = self.get_tags()\n for tag in tags:\n image_name = self.build_image_name(tag)\n try:\n self.client.images.remove(image_name, force=True)\n except Exception as ex:\n print('Cannot remove {}: {}'.format(tag, str(ex)))", "def uiClearImage(self):\n\n\t\traise foundations.exceptions.ProgrammingError(\n\t\t\"{0} | '{1}' attribute is not deletable!\".format(self.__class__.__name__, \"uiClearImage\"))", "def clean_webp_textures():\n for webp in PNG_TEXTURES['output_files']:\n if os.path.isfile(webp):\n os.remove(webp)", "def test_deletion_of_user_photo_succeeds(self):\n\t\tself.name = 'media.png'\n\t\tself.image = File(open('static/img/media.png', 'rb'))\n\t\tself.created_image = UserPhoto(image=self.image, name=self.name, created_by=self.user)\n\t\tself.created_image.save()\t\t\t\n\t\tresponse = self.client.delete('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_delete_collection_image(self):\n pass", "def test_empty_media(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['inputs']['files'][0]['mediaTypes'] = []\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n del manifest['job']['interface']['inputs']['files'][0]['mediaTypes']\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def test_delete_image(self):\n pass", "def test_empty_image(self):\n r = post(self.client, 'upload.up_image_async', {'image': ''},\n args=['questions.Question', 1])\n\n eq_(400, r.status_code)\n json_r = json.loads(r.content)\n eq_('error', json_r['status'])\n eq_('Invalid or no image received.', json_r['message'])\n eq_('You have not selected an image to upload.',\n json_r['errors']['image'][0])", "def delete_image(self):\n Image.objects.get(id = self.id).delete()", "def auto_delete_image_and_thumbnail_on_delete(sender, instance, **kwargs):\n if instance.image:\n if os.path.isfile(instance.image.path):\n os.remove(instance.image.path)\n\n if instance.thumbnail:\n if os.path.isfile(instance.thumbnail.path):\n os.remove(instance.thumbnail.path)\n\n return False", "def trim(self):\n result = library.MagickTrimImage(self.wand)\n if not result:\n self.raise_exception()", "def testMissingImage(self):\n self.assertNotIn('no_image', self.data)", "def test_image_no_requiere_del_campos_description(self):\n self.image_obj.description = ''\n\n # Si no hay error, todo OK.\n self.image_obj.save()", "def _finalize(self):\n if self.url and self.url.startswith('file://'):\n self.parse_external_files(self.url[7:])\n Media._finalize(self)", "def has_media(self):\r\n if self.image:\r\n return True\r\n return False", "def test_delete_namespaced_image_stream_tag(self):\n pass", "def test_transform_image_no_resize_png(self):\n self.expect_open_image('SomeBlobKey', (1600, 1200), mime_type='PNG')\n self.expect_resize(blob_image._DEFAULT_SERVING_SIZE)\n self.expect_encode_image('SomeImageInPng',\n images_service_pb.OutputSettings.PNG)\n self.mox.ReplayAll()\n self.assertEquals(('SomeImageInPng', 'image/png'),\n self.app._transform_image('SomeBlobKey', ''))\n self.mox.VerifyAll()", "def raw_clean(delete, invert, raw_dir, trash, raw_ext):\n raw_image_ext = f\".{raw_ext.upper()}\"\n\n # Basic user input check\n if not os.path.exists(raw_dir):\n print(f\"No '{raw_dir}' directory found!\")\n sys.exit(1)\n\n # Get list of images in different formats\n image_dir = os.getcwd()\n raw_dir = os.path.abspath(raw_dir)\n\n jpgs = set(\n [f.split('.')[0]\n for f in os.listdir(image_dir) if f.endswith(COMP_IMAGE_EXT)]\n )\n raws = set(\n [f.split('.')[0]\n for f in os.listdir(raw_dir) if f.endswith(raw_image_ext)]\n )\n\n # Find missing pairs\n paired = raws & jpgs\n jpgs_without_raw = jpgs - paired\n raws_without_jpg = raws - paired\n\n # Decide what set of files to process\n if not invert:\n images = raws_without_jpg\n workdir = raw_dir\n ext = raw_image_ext\n else:\n images = jpgs_without_raw\n workdir = image_dir\n ext = COMP_IMAGE_EXT\n\n # Process files\n for image in images:\n image_path = os.path.join(workdir, f\"{image}{ext}\")\n if not trash and not delete:\n print(f\"No pair found for '{image_path}' \")\n elif delete:\n print(f\"Deleting '{image_path}'...\")\n os.unlink(image_path)\n elif trash:\n print(f\"Trashing '{image_path}'...\")\n send2trash.send2trash(image_path)", "def clean(img):\n\n label_img = label(img, connectivity=2)\n props = sorted(regionprops(label_img), key=lambda x: x.area)\n clean = morphology.binary_closing(img)\n\n clean = morphology.remove_small_holes(clean)\n return morphology.remove_small_objects(clean,\n int(np.floor(props[-1].area) / 10), connectivity=2)", "def delete_file(self, instance, sender, **kwargs):\n super(AutoImageField, self).delete_file(instance, sender)\n if getattr(instance, self.attname):\n # Get full path and the base directory that contains the file\n file_name = getattr(instance,self.name).name\n basedir = os.path.dirname(file_name)\n \n # Look for thumbnails created from filters for the current filename\n # and delete the files\n mask = add_to_basename(file_name, '_*')\n [os.remove(os.path.join(basedir, f)) for f in glob.glob(mask)]", "def verify_media(self):\n self.check_dataset_duplicate_ids(self.media)", "def _clear(self):\n\n self.image = Image.new(\"RGB\", (self._width, self._height), self._color)", "def __on_delete(self):\n self.image.delete()", "def __on_delete(self):\n self.image.delete()", "def delete_file(sender, instance, *args, **kwargs):\n if instance.image:\n _delete_file(instance.image.path)", "def reject(request, img_id):\n if not request.user.is_staff:\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n else:\n img = Image.objects.get(id=img_id)\n os.remove(os.path.join(settings.STATIC_ROOT, \"uploaded-images\", \"%s.%s\" % (img.image_path, img.extension)))\n os.remove(os.path.join(settings.STATIC_ROOT, \"uploaded-images\", \"%s-thumb.%s\" % (img.image_path, img.extension)))\n img.delete()\n return HttpResponseRedirect(reverse('wainz.views.approve_images'))", "def remove_unactionable_images(data):\n os.makedirs(os.path.join(data, 'removed'), exist_ok=True)\n for product in os.listdir(data):\n if product.startswith('product') is False:\n continue\n path = os.path.join(data, product)\n if os.path.isdir(path) is False:\n continue\n if is_useful(path, 0.5) is False:\n print('\\tRemoving ' + path)\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, 'removed', product + '.tiff'))\n shutil.rmtree(path)\n else:\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, product + '.tiff'))", "def test_answer_meta_image_undefined(self):\n answer = Answer()\n answer.save()\n page = self.create_answer_page(answer_base=answer)\n self.assertIsNone(page.meta_image)", "def image(self):\n image = self._image\n for remove in ('oci:', 'http:', 'https:'):\n if image.startswith(remove):\n image = image.replace(remove, '')\n return image", "def test_clean_only_youtube_id():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n resource.clean()", "def test_transform_image_no_resize_gif(self):\n self.expect_open_image('SomeBlobKey', (1600, 1200), mime_type='GIF')\n self.expect_resize(blob_image._DEFAULT_SERVING_SIZE)\n # ImageService only supports PNG/JPEG encoding, so we transcode to PNG.\n self.expect_encode_image('SomeImageInPng',\n images_service_pb.OutputSettings.PNG)\n self.mox.ReplayAll()\n self.assertEquals(('SomeImageInPng', 'image/png'),\n self.app._transform_image('SomeBlobKey', ''))\n self.mox.VerifyAll()", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def test_thumbnail(self):\n pub = PublicationFactory(thumbnail__filename=\"tester.jpg\")\n self.assertEqual(\n pub.thumbnail.url, f\"/media/reading/publications/{pub.slug}/tester.jpg\"\n )\n self.assertTrue(\n pub.thumbnail.path.endswith, f\"/reading/publications/{pub.slug}/tester.jpg\"\n )\n\n # Tidy up:\n pub.thumbnail.delete()", "def tearDown(self):\n self.recipe.image.delete()", "def tearDown(self):\n if os.path.exists(settings.MEDIA_ROOT):\n shutil.rmtree(settings.MEDIA_ROOT)", "def tearDown(self):\n if os.path.exists(settings.MEDIA_ROOT):\n shutil.rmtree(settings.MEDIA_ROOT)", "def check_files(self):\n print('checking files')\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)", "def space_cleaning():\n for file in os.listdir(\".\"):\n if file.endswith(\".png\"):\n os.remove(file)", "def update_path_image_on_remove(sender, **kwargs):\n instance = kwargs.pop('instance', None)\n action = kwargs.pop('action', None)\n pk_set = kwargs.pop('pk_set', None)\n if action == \"post_remove\" and len(instance.content.all()) != 0:\n content = Content.objects.get(pk=list(pk_set)[0])\n if instance.image == content.image or not instance.image:\n content = instance.content.all()[0]\n instance.image = content.image\n instance.save()", "def clear_thumbnail(self):\n from anima.ui import utils\n utils.clear_thumbnail(self.thumbnail_graphics_view)", "def test_image_resize_anuncio_premium(self):\n self.anuncio.is_premium = True\n self.anuncio.save()\n image_obj = self.image_model()\n image_obj.anuncio = self.anuncio\n image_obj.image = simple_uploaded_file(self.image_path)\n image_obj.save()\n\n self.assertEqual(image_obj.image.width, 1000)\n self.assertEqual(image_obj.image.height, 625)\n\n # Eliminar obj para que elimine las imágenes.\n image_obj.delete()", "def del_image(self, name):\r\n if self.images is None or name not in self.images:\r\n return\r\n l = self.images\r\n self.images = None\r\n l.setdefault('/empties/', [])\r\n # push the number on the empties list\r\n l['/empties/'].append(l[name])\r\n del l[name]\r\n self.images = l", "def delete_AllImgs(self):\n self.listImages.remove_all_imgs()", "def preview_file_cleanup(sender, **kwargs):\n\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def no_image(cls):\n def eval_fn(p: Posting):\n if p.img_url is None:\n return f\"I couldn't find any images for this posting.\"\n\n return cls(eval_fn)", "def test_offer_image_url_extracter_no_content(self):\n result = offer_image_url_extracter(self.improper_soup)\n assert_equals(result, '')", "def auto_delete_image_lecture_on_delete(sender, instance, **kwargs):\n if instance.file:\n instance.file.delete(save=False)", "def test_cleanup(self):\n imgurl = \"{}spei03.nc\".format(self.processor.base_url)\n httpretty.register_uri(httpretty.GET, imgurl,\n body=get_mock_image())\n self.processor.download(imgurl, 'spei03.tif')\n self.assertNotEqual([], glob.glob(os.path.join(\n self.processor.tmp_dir, self.processor.prefix + '*')))\n self.processor.cleanup()\n self.assertEquals([], glob.glob(os.path.join(\n self.processor.tmp_dir, self.processor.prefix + '*')))", "def tearDown(self):\n self.image.delete()", "def auto_delete_file_on_delete(sender, instance, **kwargs):\r\n if instance.image:\r\n if pathlib.Path(instance.image.path).is_file():\r\n pathlib.Path(instance.image.path).unlink()", "def strip_exif(self,img):\n data = list(img.getdata())\n image_without_exif = PIL.Image.new(img.mode, img.size)\n image_without_exif.putdata(data)\n return image_without_exif", "def delete(self, *args, **kwargs):\n self.image.storage.delete(self.image.name)\n delete(self.image)\n super().delete(*args, **kwargs)", "def uiClearClickedImage(self):\n\n\t\traise foundations.exceptions.ProgrammingError(\n\t\t\"{0} | '{1}' attribute is not deletable!\".format(self.__class__.__name__, \"uiClearClickedImage\"))", "def clean():\n clean_flatbuffer_binaries()\n clean_webp_textures()", "def tearDown(self):\n Image.objects.all().delete()", "def test_img(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.img_dirty), self.img_clean)", "def delete(self, *args, **kwargs):\n self.file.delete(save=False)\n self.thumbnail.delete(save=False)\n\n super(File, self).delete(*args, **kwargs)", "def clean_image(self):\n image = self.cleaned_data.get('image')\n if image and image.size > 5242880:\n raise forms.ValidationError(u'This image is too big, limit 500kb')\n return image", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.img:\n if os.path.isfile(instance.img.path):\n os.remove(instance.img.path)", "def test_restrict_mediatype():\r\n counter = image_hook_counter()\r\n ADDINS = [feed_image_restrict_mediatypes(('image/png', 'image/gif')), counter]\r\n\r\n class TestFeedImage(feedev.File):\r\n content = \"\"\r\n def headers(p):\r\n if p == 1: return {'Content-Type': 'text/plain'}\r\n elif p == 2: return {'Content-Type': 'image/jpeg'}\r\n elif p == 3: return {'Content-Type': 'image/png; charset=ISO-8859-1'} # charsets are correctly parsed out\r\n elif p == 4: return {'Content-Type': 'image/png'}\r\n\r\n class TestFeed(feedev.Feed):\r\n content = FeedWithImage % (TestFeedImage.url)\r\n\r\n def pass1(feed):\r\n assert counter.success == 0\r\n def pass2(feed):\r\n assert counter.success == 0\r\n def pass3(feed):\r\n assert counter.success == 1\r\n def pass4(feed):\r\n assert counter.success == 2\r\n\r\n feedev.testcaller()", "def _sanitize_resources(cls, resources):\n\n try:\n for resource in cls._loop_raw(resources):\n cls._sanitize_resource(resource)\n except (KeyError, TypeError):\n _LOGGER.debug(\"no shade data available\")\n return None", "def cleanupResources():\n None", "def photo_delete(sender, instance, **kwargs):\n\tinstance.photo.delete(False)", "def pre_provider_attachment_delete(self, resource_id):\n pass", "def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False", "def clean(self):\n self.clean_rally_conf()\n self.clean_rally_logs()\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()", "def check_image_useful(image_path):\n lights_on = lights_are_on(image_path)\n if not lights_on:\n os.remove(image_path)", "def test_upload_area_cleanup(self):\n vis2_uvid='urn:mrn:stm:service:instance:furuno:vis2'\n p = Path('import')\n files = list(p.glob('**/urn:mrn:s124:*'))\n for item in files:\n print(item)\n os.remove(str(item))\n pass", "def delete(self, *args, **kwargs):\n self.image.delete()\n super(Recipe, self).delete(*args, **kwargs)", "def handleCleanMetadataKeep(self):\n logging.debug(\"Removing all metadata found...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.removeAllMeta(filePath)", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.img_png:\n if os.path.isfile(instance.img_png.path):\n os.remove(instance.img_png.path)" ]
[ "0.698834", "0.67045337", "0.6504589", "0.6436783", "0.6413871", "0.637336", "0.6318583", "0.61001015", "0.60031515", "0.59312874", "0.59246147", "0.5909386", "0.58954066", "0.5854419", "0.58376265", "0.58252335", "0.5816791", "0.5799392", "0.5791953", "0.5740084", "0.57199484", "0.57000625", "0.568925", "0.566456", "0.56227756", "0.5613354", "0.56076276", "0.5597479", "0.55968785", "0.55814916", "0.55540305", "0.5549161", "0.55150235", "0.5507612", "0.54870236", "0.54823375", "0.5468184", "0.5449529", "0.54335326", "0.5419008", "0.5404607", "0.5395674", "0.5379022", "0.537448", "0.53514063", "0.5344914", "0.5340434", "0.532998", "0.53246343", "0.53218687", "0.53166324", "0.53146935", "0.53146935", "0.5314023", "0.5313127", "0.5312571", "0.53042495", "0.52943754", "0.528995", "0.5289731", "0.5286331", "0.528621", "0.5274166", "0.52740455", "0.52740455", "0.5265082", "0.526485", "0.52645427", "0.52624965", "0.5253001", "0.525106", "0.52410156", "0.52185327", "0.5213675", "0.5211873", "0.5209026", "0.52054036", "0.52012473", "0.517851", "0.517248", "0.5159671", "0.5148946", "0.51415986", "0.51405793", "0.5133455", "0.5130756", "0.51293164", "0.5121345", "0.51191854", "0.5108204", "0.5102335", "0.51015663", "0.50939375", "0.5088761", "0.5084005", "0.5083168", "0.50804996", "0.50777453", "0.5075241", "0.507073" ]
0.7978231
0
Cleaning a media resource that only has a YouTube video ID should do nothing.
def test_clean_only_youtube_id(): resource = models.MediaResource(youtube_id="dQw4w9WgXcQ") resource.clean()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_clean_no_image_or_youtube_id():\n resource = models.MediaResource()\n\n with pytest.raises(ValidationError):\n resource.clean()", "def test_clean_both_image_and_youtube_id(image):\n resource = models.MediaResource(image=image, youtube_id=\"dQw4w9WgXcQ\")\n\n with pytest.raises(ValidationError):\n resource.clean()", "def clean_video_id(self):\n failed = False\n d = self.cleaned_data\n service = d.get('service')\n # Get the video id and clear whitespace on either side.\n video_id = d.get('video_id', '').strip()\n\n # Validate using YouTube's API:\n if service == 'youtube':\n url = ('http://gdata.youtube.com/feeds/api/videos/{}?alt=json'.\n format(video_id))\n data = requests.get(url)\n # Ensure we can parse the JSON data.\n try:\n json = simplejson.loads(data.text)\n # If not, mark this as a failure.\n except ValueError:\n failed = True\n\n # Validate using Vimeo's API:\n elif service == 'vimeo':\n data = requests.get('http://vimeo.com/api/v2/video/{}.json'.\n format(video_id))\n # Ensure we can parse the JSON data.\n try:\n json = simplejson.loads(data.text)\n # If not, mark this as a failure.\n except ValueError:\n failed = True\n\n # Respond based on the outcome.\n if failed:\n message = _(\"Couldn't validate video id using {} API. Please \"\n \"verify it exists and check for \"\n \"typos.\".format(service))\n raise forms.ValidationError(message)\n\n return video_id", "def test_video_delete(self):\n v1, v2 = make_video(media_id='1234'), make_video(media_id='2345')\n set_resources_and_sync([v1, v2])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n set_resources_and_sync([v1])\n self.assertIsNone(mpmodels.MediaItem.objects.get(id=i1.id).deleted_at)\n self.assertIsNotNone(mpmodels.MediaItem.objects_including_deleted.get(id=i2.id).deleted_at)\n self.assertFalse(mpmodels.MediaItem.objects.filter(id=i2.id).exists())", "def test_type_youtube():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n assert resource.type == models.MediaResource.TYPE_YOUTUBE", "def test_video_removal(self):\n edx_video_id = 'test1'\n remove_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n response = self.client.delete(remove_url, HTTP_ACCEPT=\"application/json\")\n self.assertEqual(response.status_code, 204)\n\n self._assert_video_removal(self.url, edx_video_id, 1)", "def play_youtube(self, media_id):\n pass", "def __trim_youtube_link(link: str):\n if \"t=\" in link:\n # chop off \"&t=\", \"?t=\" or \"#t=\"\n link = link.split('t=')[0][:-1]\n return link", "def verify_media(self):\n self.check_dataset_duplicate_ids(self.media)", "def test_clean_only_image(image):\n resource = models.MediaResource(image=image)\n\n resource.clean()", "def clear_unique_video(self):\n self.top_unique_video_entry.delete(0, END)\n self.top_unique_video_box.delete(0, END)\n self.unique_video_found = False\n self.missing_files_label.grid_remove()", "def play_youtube(self, media_id):\n raise NotImplementedError()", "def test_parse_youtube_empty(self):\r\n self.assertEqual(VideoDescriptor._parse_youtube(''),\r\n {'0.75': '',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})", "def delete_video(self, video_ID): # WORKS\n try:\n self.cur.execute(\"DELETE FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n self.db.commit()\n os.remove('static/videos/' + str(video_ID) + '.mp4')\n os.remove('static/images/' + str(video_ID) + '.jpg')\n except:\n self.db.rollback()", "def test_parse_youtube_empty(self):\r\n self.assertEqual(\r\n VideoDescriptor._parse_youtube(''),\r\n {'0.75': '',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''}\r\n )", "def test_parse_youtube_invalid(self):\r\n\r\n # invalid id\r\n youtube_str = 'thisisaninvalidid'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': '',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})\r\n # another invalid id\r\n youtube_str = ',::,:,,'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': '',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})\r\n\r\n # and another one, partially invalid\r\n youtube_str = '0.75_BAD!!!,1.0:AXdE34_U,1.25:KLHF9K_Y,1.5:VO3SxfeD,'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': '',\r\n '1.00': 'AXdE34_U',\r\n '1.25': 'KLHF9K_Y',\r\n '1.50': 'VO3SxfeD'})", "def validate_youtube(fragment):\n request=urllib.urlopen('https://www.youtube.com/watch?v=' + fragment)\n return request.getcode() == 200", "def validate_song(song):\n attrs = [\"default_arrangement\", \"composer\", \"copyright\", \"youtube\", \"ccli\"]\n for a in attrs:\n if getattr(song, a) in [None, \"None\"]:\n setattr(song, a, \"\")\n return song", "def allow_video(self, video_id):\n video = self._video_library.get_video(video_id)\n if not self._video_library.get_video(video_id):\n print(\"Cannot remove flag from video: Video does not exist\")\n return\n if not video.flag:\n print(\"Cannot remove flag from video: Video is not flagged\")\n return\n print(f\"Successfully removed flag from video: {video.title}\")\n video.set_flag(None)", "def test_only_sms_created(self):\n v1, v2 = make_video(media_id='1234'), make_video()\n set_resources_and_sync([v1, v2])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n i2 = mpmodels.MediaItem.objects.filter(jwp__key=v2.key).first()\n self.assertIsNone(i2)", "def __ext_embed_id(self, youtube_url):\n youtube_id_match = re.search(r'(?<=v=)[^&#]+', youtube_url)\n youtube_id_match = youtube_id_match or re.search(\n r'(?<=be/)[^&#]+', youtube_url)\n trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match\n else None)\n return trailer_youtube_id", "def test_empty_media(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['inputs']['files'][0]['mediaTypes'] = []\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n del manifest['job']['interface']['inputs']['files'][0]['mediaTypes']\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def delete(self, mediaId):\n headers = { 'Authorization' : self.client.authorization_header }\n\n response = requests.delete(\n self.client.url + '/media/' + mediaId,\n headers = headers\n )\n\n #print 'Response: ' + response.text\n return json.loads(response.text)", "def _get_video_data(youtube_data, playlist=None):\n def get_category(categories):\n for category in categories:\n if category['scheme'].endswith('categories.cat'):\n return category['$t'] # TODO: map category\n media = youtube_data['media$group']\n video = Video(\n source_videoid=media['yt$videoid']['$t'],\n source_listid=playlist,\n source_username=media['media$credit'][0]['$t'],\n date_published=_parse_datetime(youtube_data['published']['$t']),\n title=youtube_data['title']['$t'],\n duration=int(media['yt$duration']['seconds']) if 'yt$duration' in media else -1,\n )\n video.source_category = get_category(media.get('media$category', []))\n video.source_view_count = int(youtube_data['yt$statistics']['viewCount']) if 'yt$statistics' in youtube_data else -1\n video.source_date_uploaded = media['yt$uploaded']['$t']\n access_control = dict(\n (i['action'], i['permission'] == 'allowed')\n for i in youtube_data.get('yt$accessControl', []))\n video.restricted = access_control.get('embed') is False\n if 'app$control' in youtube_data:\n if 'yt$incomplete' in youtube_data['app$control']:\n video.restricted = True\n else:\n state = youtube_data['app$control']['yt$state']\n if state['name'] == 'restricted':\n if state['reasonCode'] == 'limitedSyndication':\n # see https://groups.google.com/d/msg/youtube-api-gdata/on504fCOEk0/oErUbCptWu4J\n video.restricted = not any(c.get('yt$format') == 5 for c in\n media.get('media$content', []))\n else:\n video.restricted = True\n for thumbnail in media.get('media$thumbnail', []):\n if 'time' not in thumbnail:\n video.thumbnails.append(\n VideoThumbnail(\n url=thumbnail['url'],\n width=thumbnail['width'],\n height=thumbnail['height']))\n for restriction in media.get('media$restriction', []):\n if restriction['type'] == 'country':\n video.restrictions.extend(\n VideoRestriction(\n relationship=restriction['relationship'],\n country=country) for country in restriction['$t'].split())\n return video", "def clear_subs_content(self):\r\n for youtube_id in self.get_youtube_ids().values():\r\n filename = 'subs_{0}.srt.sjson'.format(youtube_id)\r\n content_location = StaticContent.compute_location(self.course.id, filename)\r\n try:\r\n content = contentstore().find(content_location)\r\n contentstore().delete(content.get_id())\r\n except NotFoundError:\r\n pass", "def test_recreate_deleted_item(self):\n v1 = make_video(media_id='1234', title='testing')\n set_resources_and_sync([v1])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n self.assertEqual(i1.title, 'testing')\n i1.delete()\n\n set_resources_and_sync([v1])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n self.assertEqual(i1.title, 'testing')", "def clean():\n\n tracks = []\n removed_playlists = 0\n for playlist in PlaylistManager.find():\n\n if len(playlist.tracks) == 0:\n PlaylistManager.remove(playlist.id)\n removed_playlists += 1\n else:\n tracks += playlist.tracks\n\n tracks = list(set(tracks))\n removed_tracks = 0\n for track in TrackManager.find():\n if track.id not in tracks:\n TrackManager.remove(track.id)\n removed_tracks += 1\n\n click.secho(\"Cleanup removed:\", bold=True)\n click.secho(\n tabulate( # type: ignore\n [\n (magenta(\"Tracks:\"), removed_tracks),\n (magenta(\"Playlists:\"), removed_playlists),\n ],\n tablefmt=\"plain\",\n colalign=(\"right\", \"left\"),\n )\n )", "def test_api_video_delete_by_playlist_admin(self):\n user = factories.UserFactory()\n playlist = factories.PlaylistFactory()\n factories.PlaylistAccessFactory(\n role=models.ADMINISTRATOR, playlist=playlist, user=user\n )\n video = factories.VideoFactory(playlist=playlist)\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n self.assertEqual(models.Video.objects.count(), 1)\n\n response = self.client.delete(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 0)\n self.assertEqual(response.status_code, 204)", "def __del__(self):\n if self.video:\n self.video.release()", "def _render_no_tracking(self, video_id):\n you_tube_url = (\n 'https://www.youtube.com/embed/%s'\n '?feature=player_embedded&amp;rel=0') % video_id\n iframe = cElementTree.XML(\"\"\"\n<div class=\"gcb-video-container\">\n <iframe class=\"youtube-player\" title=\"YouTube Video Player\"\n type=\"text/html\" frameborder=\"0\" allowfullscreen=\"allowfullscreen\">\n </iframe>\n</div>\"\"\")\n iframe[0].set('src', you_tube_url)\n return iframe", "def test_delete_with_bad_id(self):\n resp = self.api_client.delete('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')", "def remove_from_playlist(self, playlist_name, video_id):\n if playlist_name.lower() in self.playlists:\n for i in self.playlists[playlist_name.lower()]:\n videos = self._video_library.get_all_videos()\n templist = []\n\n def converttostr(input_seq, seperator):\n # Join all the strings in list\n final_str = seperator.join(input_seq)\n return final_str\n\n for vid in videos:\n if i == vid.video_id:\n\n temptitle=vid.title\n print(f\"Removed video from {playlist_name}: {temptitle}\")\n self.playlists[playlist_name.lower()].remove(video_id)\n\n\n if playlist_name not in self.playlists:\n print(f\"Cannot remove video from {playlist_name}: Playlist does not exist\")\n elif video_id not in self.playlists[playlist_name.lower()]:\n print(\"Cannot remove video from my_playlist: Video does not exist\")\n #self.playlists[playlist_name.lower()].remove(video_id)\n\n\n\n #print(\"remove_from_playlist needs implementation\")", "def filter_yt(info: interceptor.Request):\n\turl = info.request_url\n\tif (url.host() == 'www.youtube.com' and\n\t\t\turl.path() == '/get_video_info' and\n\t\t\t'&adformat=' in url.query()):\n\t\tinfo.block()", "def test_metadata_not_persistence(self):\r\n self.assertIn('html5_sources', own_metadata(self.video_descriptor))\r\n attrs_to_strip = {\r\n 'show_captions',\r\n 'youtube_id_1_0',\r\n 'youtube_id_0_75',\r\n 'youtube_id_1_25',\r\n 'youtube_id_1_5',\r\n 'start_time',\r\n 'end_time',\r\n 'source',\r\n 'html5_sources',\r\n 'track'\r\n }\r\n\r\n location = self.video_descriptor.location\r\n\r\n for field_name in attrs_to_strip:\r\n delattr(self.video_descriptor, field_name)\r\n\r\n self.assertNotIn('html5_sources', own_metadata(self.video_descriptor))\r\n get_modulestore(location).update_item(self.video_descriptor, '**replace_user**')\r\n module = get_modulestore(location).get_item(location)\r\n\r\n self.assertNotIn('html5_sources', own_metadata(module))", "def compose_embed_youtube(video_id = None):\n assert(video_id != None)\n return \"http://www.youtube.com/embed/{0}?enablejsapi=1&wmode=opaque\".format(\n video_id\n )", "def allow_video(self, video_id):\n if self._video_library.get_video(video_id) is None:\n print(\"Cannot remove flag from video: Video does not exist\")\n elif not self._video_library.get_video(video_id).flagged:\n print(\"Cannot remove flag from video: Video is not flagged\")\n else:\n print(f\"Successfully removed flag from video: {self._video_library.get_video(video_id).title}\")\n self._video_library.get_video(video_id).flagged = False\n self._video_library.get_video(video_id).flag_reason = \"Not supplied\"", "async def _clear_player(self, ctx: Context):\n\n await self.config.guild(ctx.guild).player_id.clear()\n\n await ctx.message.add_reaction(CHECK_MARK)", "def get_embed_youtube(link = None):\n assert(link != None)\n assert(link != \"\")\n log.debug( \"preparsed link: \" + link)\n video_id = \"\"\n try:\n # break the link\n choppedLink = link.split(\"/\")\n if choppedLink[2].find(\"youtu.be\") >= 0:\n # Parse short link getting only last piece\n video_id = get_id_shortlink(choppedLink)\n elif choppedLink[3].find(\"attribution_link\") >= 0 :\n # Its an attribution link, a bit special\n video_id = get_id_attribution(choppedLink)\n else:\n # This should be a regular link\n video_id = get_id_regular_link(choppedLink)\n\n # and finally compose the embed link\n flink = compose_embed_youtube(video_id)\n log.debug( \"compound link: \" + flink)\n except Exception as e:\n log.error(\"Something weird happened when ending getting embed youtube\")\n log.exception(e)\n raise NotImplementedError( \"We are still working on links like \" + link)\n\n return flink", "def isYouTube(self):\n if 'youtube' in self.link.split('.'):\n return True\n return None", "def clear_playlist(self, playlist_name):\n playlist_id = playlist_name.lower()\n if not playlist_id in self.playlists.keys():\n print(f\"Cannot clear playlist {playlist_name}: Playlist does not exist\")\n return\n\n self.playlists.get(playlist_id).videos = []\n print(f\"Successfully removed all videos from {playlist_name}\")", "def test_get_video_id_from_url(self):\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/embed/DqGwxR_0d1M'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://youtu.be/DqGwxR_0d1M'), 'DqGwxR_0d1M')\n self.assertEqual(\n get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M&feature=youtu.be'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M'),\n 'DqGwxR_0d1M')", "def remove_from_playlist(self, playlist_name, video_id):\n videos = self._video_library.get_all_videos()\n video_id_list = []\n for vid in videos:\n video_id_list.append(vid.video_id)\n if video_id == vid.video_id:\n video = vid\n if playlist_name.upper() in self.playlist.keys() and video.title in self.playlist[playlist_name.upper()]:\n self.playlist[playlist_name.upper()].remove(video.title)\n print(f\"Removed video from {playlist_name}: {video.title}\")\n elif playlist_name.upper() in self.playlist.keys() and video.title not in self.playlist[playlist_name.upper()]:\n print(f\"Cannot remove video from {playlist_name}: Video is not in playlist\")\n if playlist_name.upper() not in self.playlist.keys():\n print(f\"Cannot remove video from {playlist_name}: Playlist does not exist\")\n if video_id not in video_id_list:\n print(f\"Cannot remove video from {playlist_name}: Video does not exist\")", "def test_str_no_title(media_resource_factory):\n resource = media_resource_factory()\n\n assert str(resource) == str(resource.id)", "def clear_video(self):\n self.video_file = None\n self.video_parser = None\n\n self.video_box.delete(0, END)", "def test_basic_functionality(self):\n self.assertEqual(mpmodels.MediaItem.objects.count(), 0)\n video = make_video(media_id='1234', title='test title')\n set_resources_and_sync([video])\n self.assertEqual(mpmodels.MediaItem.objects.count(), 1)\n item = mpmodels.MediaItem.objects.get(jwp__key=video.key)\n self.assertEqual(item.title, 'test title')", "def get_video_id(self):\n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.youtube_fix_url(self.original_url))\n if p.path == '/watch':\n # Url of type http://www.youtube.com/watch?v=KRaeHxwZvms&feature=g-u-u&context=G2b00124FUAAAAAAAAAA\n #logger.debug('is a watch')\n params = cgi.parse_qs(p.query)\n if 'v' in params:\n return params['v'][0]\n elif p.fragment.startswith('/watch?v='):\n # sample. http://m.youtube.com/#/watch?v=ZXkW1-HdRC8\n params = cgi.parse_qs(p.fragment)\n if '/watch?v' in params:\n return params['/watch?v'][0]\n elif p.path.startswith('/v/') or p.path.startswith('/embed/'):\n path = p.path.split('/')\n return path[-1]\n elif p.netloc == 'youtu.be':\n return p.path[1:]\n elif re.match('(.{1}/){3}([\\w+-_^/]+)', p.fragment):\n parts = p.fragment.split('/')\n return parts[-1]\n return ''", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n #logger.debug('DAILYMOTION VIDEO FOUND %s' % url)\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/embed/video/') or p.path.startswith('/swf/video/')):\n # http://www.dailymotion.com/embed/video/xmp7zw\n return re.sub('_.+', '', path_list[2])\n elif len(path_list) == 2 and (p.path.startswith('/video/') or p.path.startswith('/swf/')):\n # http://www.dailymotion.com/video/xmp7zw_whatever\n # http://www.dailymotion.com/swf/xmp7zw\n return re.sub('_.+', '', path_list[1])\n \n return ''", "async def _clear(self, ctx):\n try:\n a = discord.Streaming\n p = ctx.bot.config[\"prefix\"]\n g = a(\n name=f\"{p}help | v{ctx.bot.version}\", url=\"https://twitch.tv/monstercat\"\n )\n await self.bot.change_presence(activity=g)\n except Exception:\n await ctx.send(f\"```\\n{traceback.format_exc()}```\")\n else:\n await ctx.send(\":white_check_mark: Cleared.\")", "def prune_non_ascii_media_from_db():\n\n try:\n db = database.SessionLocal()\n media_list = crud.get_all_media(db=db)\n pbar_media_list = tqdm(media_list)\n pbar_media_list.set_description('Finding non-ASCII in titles')\n\n for media in pbar_media_list:\n if media.genres.__contains__('Animation') or len(media.genres) == 0:\n\n for letter in media.original_title:\n try:\n letter.encode(encoding='utf-8').decode('ascii')\n except UnicodeDecodeError:\n crud.delete_media_by_id(db=db, media_id=media.id)\n break\n print('Media with non-ASCII titles have been pruned')\n except Exception as e:\n print(e)", "def test_api_video_create_for_nonexistent_playlist(self):\n user = factories.UserFactory()\n some_uuid = uuid.uuid4()\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n self.assertEqual(models.Video.objects.count(), 0)\n\n response = self.client.post(\n \"/api/videos/\",\n {\"lti_id\": \"video_one\", \"playlist\": some_uuid, \"title\": \"Some video\"},\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 0)\n self.assertEqual(response.status_code, 403)", "async def async_media_stop(hass: HomeAssistant, entity_id: str | None = None) -> None:\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_STOP, data)", "def delete_music():\n track_id = request.vars.track_id\n if track_id is None:\n raise HTTP(500)\n db(db.track_data.track_id == track_id).delete()\n return \"ok\"", "def test_video_removal_multiple_courses(self):\n # remove video from course1\n edx_video_id = 'test1'\n remove_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n response = self.client.delete(remove_url, HTTP_ACCEPT=\"application/json\")\n self.assertEqual(response.status_code, 204)\n\n # verify that video is only deleted from course1 only\n self._assert_video_removal(self.url, edx_video_id, 1)\n self._assert_video_removal(self.get_url_for_course_key(self.course2.id), edx_video_id, 0)", "def __del__(self):\n self.video.release()", "def clear_playlist(self, playlist_name):\n if playlist_name.upper() in self.playlist.keys():\n self.playlist[playlist_name.upper()].clear()\n print(f\"Successfully removed all videos from {playlist_name}\")\n else:\n print(f\"Cannot clear playlist {playlist_name}: Playlist does not exist\")", "def test_task_video_download(url_to_video: str, empty_video_resource: VideoResource):\n download_video(url_to_video, empty_video_resource.id)\n empty_video_resource.refresh_from_db()\n video_instance = empty_video_resource.videos.filter(primary=True).first()\n\n assert empty_video_resource.videos.all()\n assert video_instance.extension == 'mp4'\n assert video_instance.primary\n for item in video_instance.video.open():\n assert item", "def test_embed_ok(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_URL) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"youtube_video\")\n self.find(\"<object width\")\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', NOTAGS_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_EMBED) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"<object width\")", "def shouldSkipUrl(self, url, data):\n return url in (\n # video\n self.stripUrl % '1880',\n self.stripUrl % '1669',\n )", "def remove_from_playlist(self, playlist_name, video_id):\n for i in playlist_name:\n if i.video_id == video_id:\n print(f\"Removed video from {playlist_name}: {self.title}\")", "def delete(self, request, *args, **kwargs):\n clip = self.get_object()\n clips_count = clip.video.clips.all().count()\n if clips_count <= 1:\n return Response(\n {'detail': \"You can't delete this video's only clip.\"}, \n status=status.HTTP_403_FORBIDDEN)\n \n else:\n self.perform_destroy(clip)\n return Response(status=status.HTTP_204_NO_CONTENT)", "def test_parse_youtube_one_video(self):\r\n youtube_str = '0.75:jNCf2gIqpeE'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})", "def test_parse_youtube_one_video(self):\r\n youtube_str = '0.75:jNCf2gIqpeE'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})", "def removeMovie(tweet, title, encod='utf-8'):\n if not isinstance(tweet, unicode):\n tweet = unicode(tweet, encod,'replace')\n if not isinstance(title, unicode):\n title = unicode(title, encod,'replace')\n # Convert to lower case \n tweet = withoutAccents(tweet.lower())\n title = withoutAccents(title.lower())\n # Remove movie title\n tweet = re.sub(title,'',tweet) \n \n return tweet", "def get_resource_id(self, obj):\n return obj.video.id", "def remove_from_playlist(self, playlist_name, video_id):\n if playlist_name.lower() not in self.playlists:\n print(\"Cannot remove video from\", playlist_name, end=\"\")\n print(\": Playlist does not exist\")\n elif self._video_library.get_video(video_id) is None:\n print(\"Cannot remove video from\", playlist_name, end=\"\") \n print(\": Video does not exist\") \n elif self._video_library.get_video(video_id) not in self.playlists[playlist_name.lower()]:\n print(\"Cannot remove video from\", playlist_name, end=\"\")\n print(\": Video is not in playlist \")\n else:\n print(\"Removed video from\", playlist_name, end=\"\")\n print(\":\", self._video_library.get_video(video_id).title)\n self.playlists[playlist_name.lower()].remove(self._video_library.get_video(video_id))", "def _assert_video_removal(self, url, edx_video_id, deleted_videos):\n response = self.client.get_json(url)\n self.assertEqual(response.status_code, 200)\n response_videos = json.loads(response.content.decode('utf-8'))[\"videos\"]\n self.assertEqual(len(response_videos), len(self.previous_uploads) - deleted_videos)\n\n if deleted_videos:\n self.assertNotIn(edx_video_id, [video.get('edx_video_id') for video in response_videos])\n else:\n self.assertIn(edx_video_id, [video.get('edx_video_id') for video in response_videos])", "def _handle_removed_media(self):\r\n if self.has_media():\r\n try:\r\n image = str(self.image)\r\n os.remove(image)\r\n except OSError:\r\n raise('Failure trying to remove image from filesystem.')\r\n return True", "def movie_stop(clear=True, only_fullscreen=False):\n\n if (not fullscreen) and only_fullscreen:\n return\n\n renpy.audio.music.stop(channel='movie')", "def google_youtube_check(id):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\td = {\"id\" : quote(id.encode(\"utf-8\")), \"part\" : \"id,status\", \"key\" : API_KEY}\n\t\n\tf = urlopen(YOUTUBE_INFO_URL % (urlencode(d)))\n\tytdata = load(f)\n\tif not ytdata.get(\"items\"): # if there are no items for the ID search, return False\n\t\treturn False\n\treturn True", "def test_api_video_delete_detail_anonymous(self):\n video = factories.VideoFactory()\n response = self.client.delete(f\"/api/videos/{video.id}/\")\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )\n self.assertTrue(models.Video.objects.filter(id=video.id).exists())", "def users_video_delete(self):\n user_email = request.args.get('email')\n video_title = request.args.get('video_title')\n email_token = auth.current_user()[0]\n if not video_title or not user_email:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"video_title or user_email\"))\n return messages.ERROR_JSON % \"video_title or user_email\", 400\n if user_email != email_token and not self.auth_server.profile_query(email_token)[\"admin\"]:\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n try:\n self.media_server.delete_video(user_email, video_title)\n except UnexistentVideoError:\n self.logger.debug((messages.UNEXISTENT_VIDEO_ERROR % (video_title, email_token)))\n return messages.UNEXISTENT_VIDEO_ERROR % (video_title, email_token), 404\n self.video_database.delete_video(user_email, video_title)\n return messages.SUCCESS_JSON, 200", "def _clean_id(self, dirty_id):\n return self.wsid_regex.sub(\"\", dirty_id.replace(\" \", \"_\"))", "def clear_playlist(self, playlist_name):\n if playlist_name.lower() in self.playlists:\n self.playlists[playlist_name.lower()] = []\n print(f'Successfully removed all videos from {playlist_name}')\n else:\n print(f\"Cannot clear playlist {playlist_name}: Playlist does not exist\")", "def stop_video(self):\n if self.now_playing_videoid:\n # remove the current video id from the record\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n print(f\"Stopping video: {video_playing.title}\")\n self.now_playing_videoid = ''\n self.pause = False\n else: \n print(f\"Cannot stop video: No video is currently playing\")\n\n # print(\"stop_video needs implementation\")", "def test_no_video_image(self):\n edx_video_id = 'test1'\n get_videos_url = reverse_course_url('videos_handler', self.course.id)\n video_image_upload_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n with make_image_file(\n dimensions=(settings.VIDEO_IMAGE_MIN_WIDTH, settings.VIDEO_IMAGE_MIN_HEIGHT),\n ) as image_file:\n self.client.post(video_image_upload_url, {'file': image_file}, format='multipart')\n\n val_image_url = get_course_video_image_url(course_id=self.course.id, edx_video_id=edx_video_id)\n\n response = self.client.get_json(get_videos_url)\n self.assertEqual(response.status_code, 200)\n response_videos = json.loads(response.content.decode('utf-8'))[\"videos\"]\n for response_video in response_videos:\n if response_video['edx_video_id'] == edx_video_id:\n self.assertEqual(response_video['course_video_image_url'], val_image_url)\n else:\n self.assertEqual(response_video['course_video_image_url'], None)", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n if not self.embed_url:\n self.embed_url = 'https://www.youtube.com/embed/%s?wmode=transparent' % self.get_video_id()\n \n return self.embed_url", "def test_api_video_delete_list_anonymous(self):\n video = factories.VideoFactory()\n\n response = self.client.delete(\"/api/videos/\")\n\n self.assertEqual(response.status_code, 401)\n self.assertTrue(models.Video.objects.filter(id=video.id).exists())", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def delete_video(event_id, video_id):\n event = Event.query.get_or_404(event_id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n video = Video.query.get_or_404(video_id)\n db.session.delete(video)\n db.session.commit()\n flash(\"Your video has been deleted.\", \"success\")\n return redirect(url_for(\"events.media\", id=event_id))", "def delete(self, video_id, subvideo_name):\n\n video = Video.query.get(video_id)\n if not video:\n return {'message': 'video entry not exist'}, http.HTTPStatus.NOT_FOUND\n videofile = VideoFile.query.filter_by(name=subvideo_name).first()\n if videofile:\n videofile.delete()\n else:\n return {'message': 'no related video file'}, http.HTTPStatus.NOT_FOUND\n\n return {'message': 'delete success'}, http.HTTPStatus.NO_CONTENT", "def remove_from_playlist(self, playlist_name, video_id):\n if playlist_name.lower() not in self._playlists:\n print(f\"Cannot remove video from {playlist_name}: Playlist does not exist\")\n return\n playlist = self._playlists[playlist_name.lower()]\n video = self._video_library.get_video(video_id)\n if not video:\n print(f\"Cannot remove video from {playlist_name}: Video does not exist\")\n return\n if video not in playlist.videos:\n print(f\"Cannot remove video from {playlist_name}: Video is not in playlist\")\n return\n print(f\"Removed video from {playlist_name}: {video.title}\")\n playlist.videos.remove(video)", "def __init__(self, url, params=None):\n super(YoutubeVideo, self).__init__(url, params)\n self.video_id = self.get_video_id()", "def clear_thumbnails(self):", "def clear_playlist(self, playlist_name):\n if playlist_name.lower() not in self._playlists:\n print(f\"Cannot clear playlist {playlist_name}: Playlist does not exist\")\n return\n print(f\"Successfully removed all videos from {playlist_name}\")\n self._playlists[playlist_name.lower()].videos = []", "def failed_to_find_relevant_youtube_video(track_name):\n print('YouTube Service search for {} did not bring back an appropriate video.'.format(track_name))\n pass", "def remove_base64(examples: List[TaskType]) -> List[TaskType]:\n for eg in examples:\n if \"audio\" in eg and eg[\"audio\"].startswith(\"data:\") and \"path\" in eg:\n eg[\"audio\"] = eg[\"path\"]\n if \"video\" in eg and eg[\"video\"].startswith(\"data:\") and \"path\" in eg:\n eg[\"video\"] = eg[\"path\"]\n return examples", "async def _apply_media_retention_rules(self) -> None:\n # Purge remote media\n if self._media_retention_remote_media_lifetime_ms is not None:\n # Calculate a threshold timestamp derived from the configured lifetime. Any\n # media that has not been accessed since this timestamp will be removed.\n remote_media_threshold_timestamp_ms = (\n self.clock.time_msec() - self._media_retention_remote_media_lifetime_ms\n )\n\n logger.info(\n \"Purging remote media last accessed before\"\n f\" {remote_media_threshold_timestamp_ms}\"\n )\n\n await self.delete_old_remote_media(\n before_ts=remote_media_threshold_timestamp_ms\n )\n\n # And now do the same for local media\n if self._media_retention_local_media_lifetime_ms is not None:\n # This works the same as the remote media threshold\n local_media_threshold_timestamp_ms = (\n self.clock.time_msec() - self._media_retention_local_media_lifetime_ms\n )\n\n logger.info(\n \"Purging local media last accessed before\"\n f\" {local_media_threshold_timestamp_ms}\"\n )\n\n await self.delete_old_local_media(\n before_ts=local_media_threshold_timestamp_ms,\n keep_profiles=True,\n delete_quarantined_media=False,\n delete_protected_media=False,\n )", "def media_content_id(self) -> str | None:\n # The lovelace app loops media to prevent timing out, don't show that\n if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:\n return None\n media_status = self._media_status()[0]\n return media_status.content_id if media_status else None", "def bb_youtube(hit):\n video = hit.group(1)\n return '<object width=\"425\" height=\"350\"><param name=\"movie\" value=\"http://www.youtube.com/v/%s\"></param><param name=\"wmode\" value=\"transparent\"></param><embed src=\"http://www.youtube.com/v/%s\" type=\"application/x-shockwave-flash\" wmode=\"transparent\" width=\"425\" height=\"350\"></embed></object>' % (video, video)", "def test_info_id_without_data(self):\n self.headers(academy=1)\n model = self.generate_models(authenticate=True,\n profile_academy=True,\n capability='read_media_resolution',\n role='potato')\n url = reverse_lazy('media:info_id_resolution', kwargs={'media_id': 1})\n response = self.client.get(url)\n json = response.json()\n\n self.assertEqual(json, {'detail': 'media-not-found', 'status_code': 404})\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(self.all_media_dict(), [])", "def remove_from_playlist(self, playlist_name, video_id):\n playlist_exists = False\n video_id_exists = False\n video_exists_in_playlist = False\n for playlist in list(self.playlists.keys()):\n if playlist_name.upper() == playlist.upper():\n playlist_exists = True\n real_playlist_name = playlist\n break\n \n videos = self._video_library.get_all_videos()\n for v in videos:\n if v.video_id.upper() == video_id.upper():\n video_id_exists = True\n video_title = v.title\n break\n if not playlist_exists:\n print(f\"Cannot remove video from {playlist_name}: Playlist does not exist\")\n \n elif not video_id_exists:\n print(f\"Cannot remove video from {playlist_name}: Video does not exist\")\n\n elif video_id not in self.playlists[real_playlist_name]:\n print(f\"Cannot remove video from {playlist_name}: Video is not in playlist\")\n \n else:\n self.playlists[real_playlist_name].remove(video_id.lower())\n print(f\"Removed video from {playlist_name}: {video_title}\")\n # print(\"remove_from_playlist needs implementation\")", "def extract_media_v1(data):\n user = data[\"user\"]\n location = data.get(\"location\")\n if location:\n location = {\"pk\": int(location.get(\"pk\")), \"name\": location.get(\"name\")}\n video_url = \"\"\n if \"video_versions\" in data:\n # Select Best Quality by Resolutiuon\n video_url = sorted(\n data[\"video_versions\"], key=lambda o: o[\"height\"] * o[\"width\"]\n ).pop()[\"url\"]\n product_type = data.get(\"product_type\", \"\")\n if data[\"media_type\"] == 2 and not product_type:\n product_type = \"feed\"\n thumbnail_url = ''\n if 'image_versions2' in data:\n thumbnail_url = sorted(\n data[\"image_versions2\"][\"candidates\"],\n key=lambda o: o[\"height\"] * o[\"width\"],\n ).pop()[\"url\"]\n return {\n \"pk\": int(data[\"pk\"]),\n \"taken_at\": int(data[\"taken_at\"]),\n \"id\": data[\"id\"],\n \"media_type\": data[\"media_type\"],\n \"product_type\": product_type,\n \"code\": data[\"code\"],\n \"thumbnail_url\": thumbnail_url,\n \"location\": location,\n \"user\": extract_user_short(user),\n \"comment_count\": int(data.get(\"comment_count\") or 0),\n \"like_count\": int(data.get(\"like_count\") or 0), # the media just published has no like_count\n \"caption_text\": json_value(data, \"caption\", \"text\", default=\"\"),\n \"usertags\": [\n extract_usertag(usertag)\n for usertag in data.get(\"usertags\", {}).get(\"in\", [])\n ],\n \"video_url\": video_url,\n \"view_count\": int(data.get('view_count') or 0),\n \"video_duration\": data.get('video_duration'),\n \"title\": data.get(\"title\") or None,\n \"resources\": [\n extract_resource_v1(edge)\n for edge in data.get('carousel_media', [])\n ]\n }", "def test_api_video_delete_by_playlist_instructor(self):\n user = factories.UserFactory()\n playlist = factories.PlaylistFactory()\n factories.PlaylistAccessFactory(\n role=models.INSTRUCTOR, playlist=playlist, user=user\n )\n video = factories.VideoFactory(playlist=playlist)\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n self.assertEqual(models.Video.objects.count(), 1)\n\n response = self.client.delete(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 1)\n self.assertEqual(response.status_code, 403)", "def clear_playlist(self, playlist_name):\n if playlist_name.lower() not in self.playlists:\n print(\"Cannot clear playlist\", playlist_name, end=\"\")\n print(\": Playlist does not exist\")\n else:\n print(\"Successfully removed all videos from\", playlist_name)\n self.playlists[playlist_name.lower()].clear()", "def test_delete_watchlist_no_id(self):\n first_name = \"Darth\"\n last_name = \"Vader\"\n username = \"darkside\"\n email = \"darth@darkside.com\"\n password = \"force1234\"\n\n payload_register = json.dumps({\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"username\": username,\n \"email\": email,\n \"password\": password\n })\n\n response = self.app.post('/api/auth/register',\n headers={\"Content-Type\": \"application/json\"},\n data=payload_register)\n\n payload_entry = json.dumps({\n \"poster\": \"https://movie.poster.com\",\n \"media_type\": \"movies\",\n \"title\": \"Ad Astra\",\n \"description\": \"The near future, a time\"\n + \"when both hope and hardships drive humanity\"\n + \"to look to the stars and beyond. While a\"\n + \"mysterious phenomenon menaces to destroy life\"\n + \"on planet Earth, astronaut Roy McBride undertakes\"\n + \"a mission across the immensity of space and its many\"\n + \"perils to uncover the truth about a lost expedition that\"\n + \"decades before boldly faced emptiness and silence in\"\n + \"search of the unknown.\",\n \"year\": \"2019\",\n \"release_date\": \"2019-09-17\",\n \"imdb_id\": \"tt2935510\",\n \"imdb_rating\": \"6.1\",\n \"vote_count\": \"4394\",\n \"popularity\": \"44.602\",\n \"youtube_trailer_key\": \"BsCNKuB93BA\",\n \"runtime\": 123,\n \"stars\": [\n \"Brad Pitt\",\n \"Tommy Lee Jones\",\n \"Ruth Negga\",\n \"John Ortiz\",\n \"Liv Tyler\"\n ],\n \"directors\": [\n \"Dan Bradley\",\n \"James Gray\",\n \"Sharron Reynolds-Enriquez\",\n \"Doug Torres\",\n \"Christina Fong\",\n \"Mark Valenzuela\"\n ],\n \"creators\": []\n })\n\n response = self.app.post(\n '/api/watchlists',\n headers={\"Content-Type\": \"application/json\"},\n data=payload_entry)\n\n response = self.app.get(\n '/api/watchlists',\n headers={\"Content-Type\": \"application/json\"})\n\n response = self.app.delete(\n '/api/watchlist/' + \"notvalid\",\n headers={\"Content-Type\": \"application/json\"})\n\n self.assertEqual(\"Entry with given id doesn't exist\",\n response.json['message'])\n self.assertEqual(400, response.status_code)", "def test_item_update_with_sms_going_away(self):\n v1, = set_resources_and_sync([make_video(media_id='1234')])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n self.assertEqual(i1.sms.id, 1234)\n\n # Simulate a SMS delete\n del v1['custom']['sms_media_id']\n v1['updated'] += 1\n set_resources_and_sync([v1])\n\n # SMS object should've been deleted\n i1_v2 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n self.assertFalse(hasattr(i1_v2, 'sms'))\n self.assertEqual(legacymodels.MediaItem.objects.filter(id=1234).count(), 0)", "def clear_playlist(self, playlist_name):\n print(\"clears_playlist needs implementation\")" ]
[ "0.7022397", "0.66042817", "0.61575353", "0.5879848", "0.56859034", "0.56468034", "0.5629039", "0.5511188", "0.5497184", "0.54881966", "0.54724157", "0.5448994", "0.5418817", "0.541001", "0.54045886", "0.53989905", "0.5339894", "0.5189437", "0.51847184", "0.51762545", "0.5173873", "0.5163507", "0.51614904", "0.51211625", "0.5115929", "0.5114129", "0.5113201", "0.5089893", "0.50760436", "0.5073908", "0.50673664", "0.5060285", "0.5017841", "0.5017081", "0.50165695", "0.49887913", "0.49706957", "0.49706817", "0.49694294", "0.49606332", "0.49591017", "0.49448714", "0.49276286", "0.4916497", "0.49047154", "0.48965392", "0.48874107", "0.48846942", "0.48833945", "0.48666438", "0.48542306", "0.48526153", "0.48410928", "0.4828508", "0.4822432", "0.4820213", "0.48137718", "0.48086298", "0.48060563", "0.4802534", "0.47952268", "0.47952268", "0.4788377", "0.47861856", "0.47722313", "0.47703466", "0.47561935", "0.4754721", "0.47532704", "0.4745845", "0.47373366", "0.4733029", "0.4730682", "0.47268915", "0.4725792", "0.47243625", "0.47228026", "0.47205022", "0.47205022", "0.47205022", "0.47205022", "0.47139418", "0.47137374", "0.4713599", "0.47089446", "0.47078755", "0.4693131", "0.46865633", "0.46815056", "0.46803918", "0.46711677", "0.46672487", "0.4664634", "0.46620125", "0.46584857", "0.4653702", "0.4650674", "0.4645856", "0.46215034", "0.4617117" ]
0.8092883
0
If a media resource has an image, its type property should indicate it's an image.
def test_type_image(image): resource = models.MediaResource(image=image) assert resource.type == models.MediaResource.TYPE_IMAGE
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_image(content_type):\n return content_type == \"image/jpeg\" or content_type == \"image/png\"", "def is_image(self):\r\n # we can only get this if we have headers\r\n LOG.debug('content type')\r\n LOG.debug(self.content_type)\r\n if (self.content_type is not None and\r\n self.content_type.lower() in IMAGE_TYPES.values()):\r\n return True\r\n else:\r\n return False", "def has_media(self):\r\n if self.image:\r\n return True\r\n return False", "def IsImage(self, filename):\n mimetype = mimetypes.guess_type(filename)[0]\n if not mimetype:\n return False\n return mimetype.startswith(\"image/\")", "def IsImage(self, filename):\r\n mimetype = mimetypes.guess_type(filename)[0]\r\n if not mimetype:\r\n return False\r\n return mimetype.startswith(\"image/\")", "def picture(result):\n media = result.entities.get('media')\n if media:\n return media[0].get('type') == u'photo'\n return False", "def image_media_type(name):\n return name.endswith(('.png', '.jpg', '.jpeg', '.gif', '.tiff', '.tif',\n '.svg'))", "def test_badge_should_have_image(self):\n\n badge = self.get_sample_badge()\n # It's a string, even though it often looks like a URL\n self.assertIsInstance(badge.image, str)", "def is_url_image(image_url):\n image_formats = (\"image/png\", \"image/jpeg\", \"image/jpg\")\n r = requests.head(image_url)\n logger.info(f'{image_url} has content type {r.headers[\"content-type\"]}')\n if r.headers[\"content-type\"] in image_formats:\n return True\n return False", "def image():\n\n headers = get_headers()\n if \"accept\" not in headers:\n return image_png() # Default media type to png\n\n accept = headers[\"accept\"].lower()\n\n if \"image/webp\" in accept:\n return image_webp()\n elif \"image/svg+xml\" in accept:\n return image_svg()\n elif \"image/jpeg\" in accept:\n return image_jpeg()\n elif \"image/png\" in accept or \"image/*\" in accept or \"*/*\" in accept:\n return image_png()\n else:\n return status_code(406) # Unsupported media type", "def test_restrict_mediatype():\r\n counter = image_hook_counter()\r\n ADDINS = [feed_image_restrict_mediatypes(('image/png', 'image/gif')), counter]\r\n\r\n class TestFeedImage(feedev.File):\r\n content = \"\"\r\n def headers(p):\r\n if p == 1: return {'Content-Type': 'text/plain'}\r\n elif p == 2: return {'Content-Type': 'image/jpeg'}\r\n elif p == 3: return {'Content-Type': 'image/png; charset=ISO-8859-1'} # charsets are correctly parsed out\r\n elif p == 4: return {'Content-Type': 'image/png'}\r\n\r\n class TestFeed(feedev.Feed):\r\n content = FeedWithImage % (TestFeedImage.url)\r\n\r\n def pass1(feed):\r\n assert counter.success == 0\r\n def pass2(feed):\r\n assert counter.success == 0\r\n def pass3(feed):\r\n assert counter.success == 1\r\n def pass4(feed):\r\n assert counter.success == 2\r\n\r\n feedev.testcaller()", "def isImage(imgref):\n if (imgref.endswith(\"JPG\")):\n return True\n if (imgref.endswith(\"jpg\")):\n return True\n if (imgref.endswith(\"gif\")):\n return True\n if (imgref.endswith(\"png\")):\n return True\n return False", "def has_image(self):\n return hasattr(self, \"_image\") and self._image is not None", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def imageType(self):\n return self.__imageType", "def image_type(self) -> ImageType:\n return self._image_type", "def images_media_filter(hash_str, mime_type):\n return mime_type in MIME_TO_EXTESION_MAPPING", "def except_image_only(resource):\n if resource.image is None:\n raise FeatureExtractionError(resource, 400, 'Image resource is required')\n if resource.mask:\n raise FeatureExtractionError(resource, 400, 'Mask resource is not accepted')\n if resource.gobject:\n raise FeatureExtractionError(resource, 400, 'Gobject resource is not accepted')", "def type(self):\n return _image.image_type(self)", "def get_image(article):\n image_url = None\n media = article.get('media', None)\n if media is not None:\n for m in media:\n media_type = m['media_type'].get('name', None) \n if media_type == 'image':\n image_url = m['url']\n break\n \n return image_url", "def config_media_type(self):\n pass", "def config_media_type(self):\n pass", "def media_type(self):\n return self._media_type", "def _set_content_type(cls, request, response, _action, with_request):\n e = webob.exc.HTTPOk(headerlist=[('Content-Type', '')])\n img = _action(request, e) if with_request else _action()\n e.body = img\n\n content_type = e.content_type\n if not content_type:\n # If no ``Content-Type`` is already set, use the ``imghdr`` module\n # to guess the format of the image\n content_type = 'image/' + (imghdr.what(None, img[:32]) or '*')\n e.content_type = content_type\n\n raise e", "def medium_image(self) -> Optional[str]:\n return pulumi.get(self, \"medium_image\")", "def get_type(ext):\n if ext.lower() in Asset.SUPPORTED_IMAGE_EXT['in']:\n return 'image'\n return 'file'", "def __image_ext_content_type(ext):\n if ext not in spec.default_content_types:\n tmpl = \"unsupported image file extension '%s'\"\n raise TypeError(tmpl % (ext))\n content_type = spec.default_content_types[ext]\n if not content_type.startswith('image/'):\n tmpl = \"'%s' is not an image content type; ext '%s'\"\n raise TypeError(tmpl % (content_type, ext))\n return content_type", "def image(self):\n image_id = self.attrs.get('ImageID', self.attrs['Image'])\n if image_id is None:\n return None\n return self.client.images.get(image_id.split(':')[1])", "def check_type(filename):\n try:\n im = Image.read(filename)\n except SanperaError:\n return False\n else:\n return im.original_format in [b'JPEG', b'PNG', b'GIF']", "def image_url(self):\n return self.photo_url or GENERIC_IMAGE", "def prepare_media_file_type(self, object):\n if object.media is not None:\n return object.media.media_file_type\n else:\n return ''", "def uses_media(self):\n return True", "def coerce_image(image):\n\n if not image:\n if hasattr(settings, 'BETTY_DEFAULT_IMAGE') and settings.BETTY_DEFAULT_IMAGE != None:\n # If we have a default image, let's use that.\n return AnonymousImageField(settings.BETTY_DEFAULT_IMAGE)\n else:\n return None\n\n if not isinstance(image, ImageFieldFile):\n # If this isn't an ImageField, coerce it\n try:\n image_id = int(image)\n except:\n if settings.BETTY_DEFAULT_IMAGE:\n image_id = settings.BETTY_DEFAULT_IMAGE\n else:\n return None\n image = AnonymousImageField(image_id)\n\n return image", "def image(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image\")", "def img_ok(update, send_msg=True):\n if update.message.document:\n if not update.message.document.mime_type.startswith(\"image\"):\n if send_msg:\n status_usr_msg(update=update, status='invalid', obj='image')\n img = None\n else:\n img = update.message.document\n else:\n img = update.message.photo[-1]\n if img:\n if img.file_size > MAX_FILESIZE_DOWNLOAD:\n if send_msg:\n status_usr_msg(update, 'too large', 'image')\n img = None\n if img and send_msg:\n status_usr_msg(update, 'ok', 'image')\n return img", "def media_images_format(self, media_images_format):\n\n self._media_images_format = media_images_format", "def test_clean_only_image(image):\n resource = models.MediaResource(image=image)\n\n resource.clean()", "def test_invalid_filetype(self):\n rv = self.post('/queue/',\n content={'image': (StringIO('This is not an image'),\n 'text.txt')},\n token=self.user_token)\n self.assertJSONError(rv, 'TagalleryInvalidFileExtension')\n return", "def media_type(self) -> str:\n return self._media_type", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_read_namespaced_image_stream_image(self):\n pass", "def validate_image(image):\r\n\r\n # sets fallback URL and image type\r\n fallback_URL = url_for('static', filename='images/default.png')\r\n img_type = ('image/jpeg', 'image/bmp', 'image/png', 'image/gif')\r\n\r\n try:\r\n # tries to get a response and the content type of the URL\r\n resp = requests.get(image)\r\n r = resp.headers.get('content-type')\r\n\r\n # tests if the URL is valid and image\r\n if resp.status_code == 200:\r\n # URL is valid\r\n if r in img_type:\r\n # URL is image\r\n return image\r\n\r\n else:\r\n # URL is not an image\r\n return fallback_URL\r\n\r\n else:\r\n # URL is invalid\r\n return fallback_URL\r\n\r\n except:\r\n # URL is invalid\r\n return fallback_URL", "def image_type(self, image_type: ImageType):\n\n self._image_type = image_type", "def validate_image_type(filename: str) -> bool:\n supported_extensions = (\"png\", \"jpg\", \"jpeg\")\n return (filename not in (None, \"\")) and (get_extension(filename) in supported_extensions)", "def _add_image(self, image):\n document = self._control.document()\n name = str(image.cacheKey())\n document.addResource(QtGui.QTextDocument.ImageResource,\n QtCore.QUrl(name), image)\n format = QtGui.QTextImageFormat()\n format.setName(name)\n return format", "def test_fetch_or_create_disk_media_item_with_image(db):\n data = datadir.join('1200x6566.png').read(mode='rb')\n im = images.from_buffer(data)\n item = media.fetch_or_create_media_item(data, im=im)\n assert item.file_type == 'png'\n assert item.attributes == {'width': 1200, 'height': 6566}", "def OnGetItemImage(self, item):\n\n return 0", "def modify_image_url(image_url, type=''):\n parsed_uri = urlparse(image_url)\n if parsed_uri.scheme == 'https' or parsed_uri.scheme == 'http':\n pass\n elif image_url == '':\n image_url = '/media/default_' + type + '.jpg'\n else:\n image_url = '/media/' + image_url\n return image_url", "def is_image(mine=None, file=None):\n if file:\n mine = get_file_mine(file)\n print(mine)\n if mine:\n return mine.find('image') != -1\n\n return False", "def _schema_type(self) -> Optional[type]:\n return ImageSchema", "def __contains__(self, image: Any) -> bool:\n return isinstance(image, self.native_image_type)", "def image(self):\n return self.__getattr__(\"image\", _returnType=\"value\", _location=\"remote\")", "def test_should_file_field(self):\n self.assertIn(\"image\", self.fields)", "def image(self, image):\n # type: (string_types) -> None\n\n if image is not None:\n if not isinstance(image, string_types):\n raise TypeError(\"Invalid type for `image`, type has to be `string_types`\")\n\n self._image = image", "def hasImage(self):\n return self._image is not None", "async def async_get_media_image(self) -> tuple[bytes | None, str | None]:\n if self._client.current_track:\n image = bytes(self._client.current_track[\"art\"])\n return (image, \"image/png\")\n\n return None, None", "def media(self):\n return Edition.additional_type_to_medium", "def media_image_url(self):\n return self._current_item[\"image\"]", "def img_urls(self, media, type = \"low_resolution\"):\n\n imgs = {}\n\n for item in media:\n if item[\"type\"] != \"image\":\n continue\n\n imgs[item[\"id\"]] = item[\"images\"][type][\"url\"]\n\n return imgs", "def get_image ( self, object ):\n return self.image", "def _set_image(\n meta: Dict,\n image: Optional[\"AttachmentProperty\"] = None,\n image_fit: Optional[Union[str, ImageFitValue]] = ImageFitValue.CONTAIN,\n **kwargs,\n) -> Dict:\n meta[MetaWidget.IMAGE_FIT] = check_enum(image_fit, ImageFitValue, \"image_fit\")\n\n from pykechain.models import Property\n\n if isinstance(image, Property) and image.type == PropertyType.ATTACHMENT_VALUE:\n meta.update(\n {\n MetaWidget.CUSTOM_IMAGE: f\"/api/v3/properties/{image.id}/preview\",\n MetaWidget.SHOW_IMAGE_VALUE: CardWidgetImageValue.CUSTOM_IMAGE,\n }\n )\n elif image is None:\n meta.update(\n {\n MetaWidget.CUSTOM_IMAGE: None,\n MetaWidget.SHOW_IMAGE_VALUE: CardWidgetImageValue.NO_IMAGE,\n }\n )\n else:\n raise IllegalArgumentError(\n \"When using the add_card_widget or add_service_card_widget, 'image' must be an \"\n \"'AttachmentProperty' or None. Type is: {}\".format(type(image))\n )\n return meta", "def test_guess_content_type(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n\n src = os.path.join(self.upload_path, 'test.jpg')\n id = utils.generate_id('demo.jpg')\n backend.put(src, id, True)\n\n path = '/'.join(backend.id_to_path(id)) + '/demo.jpg'\n client = boto3.client('s3', **backend.credentials)\n res = client.head_object(\n Bucket=backend.bucket_name,\n Key=path\n )\n headers = res['ResponseMetadata']['HTTPHeaders']\n self.assertEquals('image/jpeg', headers['content-type'])", "def python_type(self) -> type:\n return ImageEntity", "def isPicture(file): \n return os.path.splitext(file)[1][1:].lower() in ['jpg', 'jpeg', 'gif', 'png', 'tif', 'tiff', 'bmp']", "def native_image_type(self) -> Type[Any]:\n pass", "def image_url(self, url):\n return self.is_regex_url(url, self.is_image_regex)", "def test_fetch_or_create_disk_media_item_with_image_and_attributes(db):\n data = datadir.join('1200x6566.png').read(mode='rb')\n im = images.from_buffer(data)\n item = media.fetch_or_create_media_item(data, file_type='png', im=im, attributes={'spam': 'eggs'})\n assert item.attributes == {'spam': 'eggs', 'width': 1200, 'height': 6566}", "def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)", "def get_type(self):\n\n if self.type: return self.type\n if str(self.get_ext()) in str(MIMETYPES_VIDEOS_LIST):\n self.type = Video()\n elif str(self.get_ext()) in str(MIMETYPES_IMAGES_LIST):\n self.type = Image()\n else: Settings.warn_print(\"unable to parse file type\")\n return self.type", "def test_empty_image(self):\n r = post(self.client, 'upload.up_image_async', {'image': ''},\n args=['questions.Question', 1])\n\n eq_(400, r.status_code)\n json_r = json.loads(r.content)\n eq_('error', json_r['status'])\n eq_('Invalid or no image received.', json_r['message'])\n eq_('You have not selected an image to upload.',\n json_r['errors']['image'][0])", "def imagesave(description):\n if not description:\n # if description is empty for any reason, it has no images.\n return False\n\n description = json.loads(description)\n\n for block in description['data']:\n if block['type'] == 'image508':\n imagepath = block['data']['file']['path']\n\n desc = block['data']['image_description']\n\n thisimage = Media.objects.filter(file=imagepath).first()\n if thisimage is None:\n thisimage = Media(file=imagepath)\n thisimage.title = block['data']['image_title']\n thisimage.mediatype = Media.IMAGE\n thisimage.description = desc\n\n thisimage.save()\n\n return True", "def test_invalid_image(self):\n with open('apps/upload/__init__.py', 'rb') as f:\n r = post(self.client, 'upload.up_image_async', {'image': f},\n args=['questions.Question', 1])\n\n eq_(400, r.status_code)\n json_r = json.loads(r.content)\n eq_('error', json_r['status'])\n eq_('Invalid or no image received.', json_r['message'])\n eq_('The submitted file is empty.', json_r['errors']['image'][0])", "def test_invalid_image_extensions(self):\n with open('apps/upload/tests/media/test_invalid.ext', 'rb') as f:\n r = post(self.client, 'upload.up_image_async', {'image': f},\n args=['questions.Question', 1])\n\n eq_(400, r.status_code)\n json_r = json.loads(r.content)\n eq_('error', json_r['status'])\n eq_('Invalid or no image received.', json_r['message'])\n eq_('Please upload an image with one of the following extensions: '\n 'jpg, jpeg, png, gif.', json_r['errors']['__all__'][0])", "def _media_processing(self):\n self.set_property('processing_type', 'media')", "def media_image_url(self):\n if (media_status := self._media_status()[0]) is None:\n return None\n\n images = media_status.images\n\n return images[0].url if images and images[0].url else None", "def set_image(self, **kwargs):\n self.image = kwargs.get('url')", "def _upload_media(\n self,\n *,\n expected_mime_prefix: str | None = None,\n media_path: str,\n upload_type: str = \"link\",\n ):\n if media_path is None:\n file = Path(__file__).absolute()\n media_path = file.parent.parent.parent / \"images\" / \"PRAW logo.png\"\n else:\n file = Path(media_path)\n\n file_name = file.name.lower()\n file_extension = file_name.rpartition(\".\")[2]\n mime_type = {\n \"png\": \"image/png\",\n \"mov\": \"video/quicktime\",\n \"mp4\": \"video/mp4\",\n \"jpg\": \"image/jpeg\",\n \"jpeg\": \"image/jpeg\",\n \"gif\": \"image/gif\",\n }.get(\n file_extension, \"image/jpeg\"\n ) # default to JPEG\n if (\n expected_mime_prefix is not None\n and mime_type.partition(\"/\")[0] != expected_mime_prefix\n ):\n msg = f\"Expected a mimetype starting with {expected_mime_prefix!r} but got mimetype {mime_type!r} (from file extension {file_extension!r}).\"\n raise ClientException(msg)\n img_data = {\"filepath\": file_name, \"mimetype\": mime_type}\n\n url = API_PATH[\"media_asset\"]\n # until we learn otherwise, assume this request always succeeds\n upload_response = self._reddit.post(url, data=img_data)\n upload_lease = upload_response[\"args\"]\n upload_url = f\"https:{upload_lease['action']}\"\n upload_data = {item[\"name\"]: item[\"value\"] for item in upload_lease[\"fields\"]}\n\n response = self._read_and_post_media(file, upload_url, upload_data)\n if not response.ok:\n self._parse_xml_response(response)\n try:\n response.raise_for_status()\n except HTTPError as err:\n raise ServerError(response=err.response) from None\n\n websocket_url = upload_response[\"asset\"][\"websocket_url\"]\n\n if upload_type == \"link\":\n return f\"{upload_url}/{upload_data['key']}\", websocket_url\n return upload_response[\"asset\"][\"asset_id\"], websocket_url", "def media_image_url(self):\n return self._imageUrl", "def media_image_url(self):\n return self._media_image_url" ]
[ "0.71367157", "0.6835922", "0.6663885", "0.6654667", "0.6619548", "0.648332", "0.6373138", "0.6143732", "0.6071515", "0.59954345", "0.5967518", "0.596137", "0.59485954", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5918796", "0.5825869", "0.58213276", "0.57909465", "0.57855016", "0.5784638", "0.5772991", "0.57298553", "0.57298553", "0.57254416", "0.57228017", "0.57180566", "0.571092", "0.56945485", "0.5666204", "0.56623966", "0.56548417", "0.5651278", "0.56505615", "0.5649174", "0.5624002", "0.56094456", "0.56012934", "0.55623955", "0.55597764", "0.5551419", "0.5549515", "0.5548107", "0.55364126", "0.55192286", "0.5517369", "0.55102265", "0.5503746", "0.550057", "0.5496708", "0.5481196", "0.5471836", "0.54701275", "0.5452185", "0.5446632", "0.5443276", "0.5442104", "0.54399484", "0.54314905", "0.54283535", "0.54219073", "0.5409043", "0.538825", "0.53754556", "0.53729904", "0.5372593", "0.5369604", "0.5368586", "0.53614604", "0.5358293", "0.53570616", "0.535013", "0.5346576", "0.53431237", "0.5340188", "0.5340047", "0.53371346", "0.53361696", "0.5332318", "0.53240925", "0.5304903" ]
0.7742908
0
If a media resource has a YouTube video ID, its type property should indicate it's a YouTube video.
def test_type_youtube(): resource = models.MediaResource(youtube_id="dQw4w9WgXcQ") assert resource.type == models.MediaResource.TYPE_YOUTUBE
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play_youtube(self, media_id):\n pass", "def isYouTube(self):\n if 'youtube' in self.link.split('.'):\n return True\n return None", "def play_youtube(self, media_id):\n raise NotImplementedError()", "def on_play(self, event, type=\"yt\", content=None):\n urls = {\n \"https://www.youtube.com/watch?v=\": \"yt\",\n \"https://youtube.com/watch?v=\": \"yt\",\n \"https://youtu.be\": \"yt\",\n \"https://soundcloud.com\": \"sc\",\n } # /watch?v= /watch?v=\n search_prefixs = {\n \"youtube\": \"ytsearch:{}\",\n \"yt\": \"ytsearch:{}\",\n \"soundcloud\": \"scsearch:{}\",\n \"sc\": \"scsearch:{}\",\n }\n self.pre_check(event)\n if event.guild.id not in self.cool_down:\n self.cool_down[event.guild.id] = {}\n if (event.author.id not in self.cool_down[\"general\"] or\n time() - self.cool_down[\"general\"][event.author.id] >= 1):\n if (event.guild.id not in self.cool_down[\"playlist\"] or\n not self.cool_down[\"playlist\"][event.guild.id]):\n self.cool_down[\"general\"][event.author.id] = time()\n if event.guild.get_member(event.author).get_voice_state():\n self.on_join(event)\n self.same_channel_check(event)\n if type not in search_prefixs.keys():\n if type == \"override\":\n if event.author.id not in self.owners:\n return api_loop(\n event.channel.send_message,\n \"You don't own me\",\n )\n video_url = content\n url_found = True\n pass\n elif content is not None:\n content = \"{} {}\".format(type, content)\n type = \"yt\"\n else:\n content = type\n type = \"yt\"\n elif type in search_prefixs.keys() and content is None:\n return api_loop(\n event.channel.send_message,\n \"Search (content) argument missing.\",\n )\n if \"url_found\" not in locals():\n url_found = False\n for url, index in urls.items():\n if url in content:\n url_found = True\n video_url = content\n type = index\n if not url_found:\n if type in search_prefixs:\n video_url = search_prefixs[type].format(content)\n else:\n video_url = search_prefixs[\"yt\"].format(content)\n youtubedl_object = YoutubeDLInput(video_url, command=\"ffmpeg\")\n try:\n yt_data = self.get_ytdl_values(youtubedl_object.info)\n except DownloadError as e:\n return api_loop(\n event.channel.send_message,\n \"Video not avaliable: {}\".format(e),\n )\n if yt_data[\"is_live\"]:\n return api_loop(\n event.channel.send_message,\n \"Livestreams aren't supported\",\n )\n elif yt_data[\"duration\"] > 3620:\n return api_loop(\n event.channel.send_message,\n \"The maximum supported length is 1 hour.\",\n )\n self.get_player(event.guild.id).append(youtubedl_object)\n api_loop(\n event.channel.send_message,\n \"Added ``{}`` by ``{}`` using ``{}``.\".format(\n yt_data[\"title\"],\n yt_data[\"uploader\"],\n yt_data[\"source\"],\n ),\n )\n else:\n api_loop(\n event.channel.send_message,\n \"Currently adding playlist, please wait.\",\n )\n else:\n cool = round(\n Decimal(\n 1 - (time() - self.cool_down[\"general\"][event.author.id]),\n ),\n )\n api_loop(\n event.channel.send_message,\n \"Cool down: {} seconds left.\".format(cool),\n )", "def testVideoTrackType(self):\n\n trackLine = _buildTrackLine(0, 'video', {'hello': 'goodbye'})\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n 'video',\n trackType,\n )", "def testVideoTrackType(self):\n\n trackLine = _buildTrackLine(967, 'subtitles', {'hello': 'goodbye'})\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n 'subtitles',\n trackType,\n )", "def youtube(self):\n return self._youtube", "def _get_video_data(youtube_data, playlist=None):\n def get_category(categories):\n for category in categories:\n if category['scheme'].endswith('categories.cat'):\n return category['$t'] # TODO: map category\n media = youtube_data['media$group']\n video = Video(\n source_videoid=media['yt$videoid']['$t'],\n source_listid=playlist,\n source_username=media['media$credit'][0]['$t'],\n date_published=_parse_datetime(youtube_data['published']['$t']),\n title=youtube_data['title']['$t'],\n duration=int(media['yt$duration']['seconds']) if 'yt$duration' in media else -1,\n )\n video.source_category = get_category(media.get('media$category', []))\n video.source_view_count = int(youtube_data['yt$statistics']['viewCount']) if 'yt$statistics' in youtube_data else -1\n video.source_date_uploaded = media['yt$uploaded']['$t']\n access_control = dict(\n (i['action'], i['permission'] == 'allowed')\n for i in youtube_data.get('yt$accessControl', []))\n video.restricted = access_control.get('embed') is False\n if 'app$control' in youtube_data:\n if 'yt$incomplete' in youtube_data['app$control']:\n video.restricted = True\n else:\n state = youtube_data['app$control']['yt$state']\n if state['name'] == 'restricted':\n if state['reasonCode'] == 'limitedSyndication':\n # see https://groups.google.com/d/msg/youtube-api-gdata/on504fCOEk0/oErUbCptWu4J\n video.restricted = not any(c.get('yt$format') == 5 for c in\n media.get('media$content', []))\n else:\n video.restricted = True\n for thumbnail in media.get('media$thumbnail', []):\n if 'time' not in thumbnail:\n video.thumbnails.append(\n VideoThumbnail(\n url=thumbnail['url'],\n width=thumbnail['width'],\n height=thumbnail['height']))\n for restriction in media.get('media$restriction', []):\n if restriction['type'] == 'country':\n video.restrictions.extend(\n VideoRestriction(\n relationship=restriction['relationship'],\n country=country) for country in restriction['$t'].split())\n return video", "def validate_youtube(fragment):\n request=urllib.urlopen('https://www.youtube.com/watch?v=' + fragment)\n return request.getcode() == 200", "def getMedia(media_type, media_id):\n\n mediaURL = BASE_URL + media_type + \"/\" + str(media_id) + API_KEY\n videoURL = BASE_URL + media_type + \"/\" + str(media_id) + '/videos' + API_KEY\n\n # get the data from the API\n headers = {'Accept': 'application/json'}\n media_request = requests.get(mediaURL, headers=headers)\n video_request = requests.get(videoURL, headers=headers)\n\n # parse to json array\n media_response = media_request.json()\n video_response = video_request.json()\n\n # pull out desired attributes from json data\n data = {\n 'poster': 'http://image.tmdb.org/t/p/w500' + media_response[\"poster_path\"],\n 'title': media_response[\"title\"],\n 'storyline': media_response[\"overview\"],\n 'trailer': 'https://www.youtube.com/watch?v=' + video_response[\"results\"][0][\"key\"]\n }\n\n return data", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def get_video(self):\n if self.parsing_template.video and self.parsing_template.video in self.headline.url:\n return True\n return False", "def _get_activity_type(self, video):\n\n activity_type = \"https://w3id.org/xapi/video/activity-type/video\"\n\n # When the video is a live we change the activity to webinar\n if video.is_live:\n activity_type = \"http://id.tincanapi.com/activitytype/webinar\"\n\n return activity_type", "def is_video(self):\n val = False\n if self.__dict__['codec_type']:\n if self.__dict__['codec_type'] == 'video':\n val = True\n return val", "def __ext_embed_id(self, youtube_url):\n youtube_id_match = re.search(r'(?<=v=)[^&#]+', youtube_url)\n youtube_id_match = youtube_id_match or re.search(\n r'(?<=be/)[^&#]+', youtube_url)\n trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match\n else None)\n return trailer_youtube_id", "def test_get_video_id_from_url(self):\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/embed/DqGwxR_0d1M'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://youtu.be/DqGwxR_0d1M'), 'DqGwxR_0d1M')\n self.assertEqual(\n get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M&feature=youtu.be'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M'),\n 'DqGwxR_0d1M')", "def test_clean_only_youtube_id():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n resource.clean()", "def google_youtube_check(id):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\td = {\"id\" : quote(id.encode(\"utf-8\")), \"part\" : \"id,status\", \"key\" : API_KEY}\n\t\n\tf = urlopen(YOUTUBE_INFO_URL % (urlencode(d)))\n\tytdata = load(f)\n\tif not ytdata.get(\"items\"): # if there are no items for the ID search, return False\n\t\treturn False\n\treturn True", "def test_list_media_type(self):\n\n # check if documentalist has access to list media-types\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, \"Video\")", "def filter_yt(info: interceptor.Request):\n\turl = info.request_url\n\tif (url.host() == 'www.youtube.com' and\n\t\t\turl.path() == '/get_video_info' and\n\t\t\t'&adformat=' in url.query()):\n\t\tinfo.block()", "def get_embed_video_model_string():\n return getattr(settings, \"WAGTAILEMBEDVIDEOS_EMBEDVIDEO_MODEL\", \"wagtail_embed_videos.EmbedVideo\")", "def __init__(self, url, params=None):\n super(YoutubeVideo, self).__init__(url, params)\n self.video_id = self.get_video_id()", "def test_note_related_with_work_order_has_video_attachment(fake_note_with_video_attachment):\n\n note_id_value: str = str(uuid.uuid4())\n a_note = Note.from_dict(note_id_value, fake_note_with_video_attachment)\n assert a_note.has_video_attachment()\n assert a_note.is_document\n assert \"video\" in a_note.mime_type\n assert a_note.object_type == \"msdyn_workorder\"", "def compose_embed_youtube(video_id = None):\n assert(video_id != None)\n return \"http://www.youtube.com/embed/{0}?enablejsapi=1&wmode=opaque\".format(\n video_id\n )", "def media_content_type(self):\n return MEDIA_TYPE_MUSIC\n # return MEDIA_TYPE_PLAYLIST", "def has_video(self):\n return self.__video_format is not None", "def get_video_id(self):\n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.youtube_fix_url(self.original_url))\n if p.path == '/watch':\n # Url of type http://www.youtube.com/watch?v=KRaeHxwZvms&feature=g-u-u&context=G2b00124FUAAAAAAAAAA\n #logger.debug('is a watch')\n params = cgi.parse_qs(p.query)\n if 'v' in params:\n return params['v'][0]\n elif p.fragment.startswith('/watch?v='):\n # sample. http://m.youtube.com/#/watch?v=ZXkW1-HdRC8\n params = cgi.parse_qs(p.fragment)\n if '/watch?v' in params:\n return params['/watch?v'][0]\n elif p.path.startswith('/v/') or p.path.startswith('/embed/'):\n path = p.path.split('/')\n return path[-1]\n elif p.netloc == 'youtu.be':\n return p.path[1:]\n elif re.match('(.{1}/){3}([\\w+-_^/]+)', p.fragment):\n parts = p.fragment.split('/')\n return parts[-1]\n return ''", "def media_content_type(self) -> str | None:\n return self._device.movie.media_type", "async def youtube(self, ctx, *, query):\r\n\r\n utub = 'https://youtube.com/results?search_query='\r\n url = utub + query.replace(\" \", \"+\")\r\n r = requests.get(url).text\r\n num1 = r.find('{\"videoRenderer')\r\n num2 = r.find('{\"videoRenderer', num1+1)\r\n # print (num1)\r\n # print (num2)\r\n videoRenderer = (json.loads(r[num1:num2-1])[\"videoRenderer\"])\r\n vid = (videoRenderer[\"videoId\"])\r\n page = (\"https://youtube.com/watch?v=\" + vid)\r\n await ctx.send(page)", "def get_youtube_video_url(video_id):\n url = \"https://www.youtube.com/watch?v=\" + video_id\n return url", "def get_resource_id(self, obj):\n return obj.video.id", "async def async_play_media(\n self, media_type: MediaType | str, media_id: str, **kwargs: Any\n ) -> None:\n await self._volumio.replace_and_play(json.loads(media_id))", "def media_content_type(self) -> MediaType | None:\n # The lovelace app loops media to prevent timing out, don't show that\n if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:\n return None\n if (media_status := self._media_status()[0]) is None:\n return None\n if media_status.media_is_tvshow:\n return MediaType.TVSHOW\n if media_status.media_is_movie:\n return MediaType.MOVIE\n if media_status.media_is_musictrack:\n return MediaType.MUSIC\n\n chromecast = self._get_chromecast()\n if chromecast.cast_type in (\n pychromecast.const.CAST_TYPE_AUDIO,\n pychromecast.const.CAST_TYPE_GROUP,\n ):\n return MediaType.MUSIC\n\n return MediaType.VIDEO", "def get_yt_video(yt_url):\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'outtmpl': '%(id)s.%(ext)s'\n }\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result = ydl.extract_info(\n #'http://www.youtube.com/watch?v=BaW_jenozKc',\n yt_url,\n download=True # We just want to extract the info\n )\n\n if 'entries' in result:\n # Can be a playlist or a list of videos\n video = result['entries'][0]\n else:\n # Just a video\n video = result\n\n return video", "def isVideoFolder():", "def config_media_type(self):\n pass", "def config_media_type(self):\n pass", "def is_video(self):\n if self.settings.background_image is None:\n return False\n\n filename, _ = b64decode_file(self.settings.background_image)\n self.mimetype, _ = mimetypes.guess_type(filename)\n return 'video' in self.mimetype", "def clean_video_id(self):\n failed = False\n d = self.cleaned_data\n service = d.get('service')\n # Get the video id and clear whitespace on either side.\n video_id = d.get('video_id', '').strip()\n\n # Validate using YouTube's API:\n if service == 'youtube':\n url = ('http://gdata.youtube.com/feeds/api/videos/{}?alt=json'.\n format(video_id))\n data = requests.get(url)\n # Ensure we can parse the JSON data.\n try:\n json = simplejson.loads(data.text)\n # If not, mark this as a failure.\n except ValueError:\n failed = True\n\n # Validate using Vimeo's API:\n elif service == 'vimeo':\n data = requests.get('http://vimeo.com/api/v2/video/{}.json'.\n format(video_id))\n # Ensure we can parse the JSON data.\n try:\n json = simplejson.loads(data.text)\n # If not, mark this as a failure.\n except ValueError:\n failed = True\n\n # Respond based on the outcome.\n if failed:\n message = _(\"Couldn't validate video id using {} API. Please \"\n \"verify it exists and check for \"\n \"typos.\".format(service))\n raise forms.ValidationError(message)\n\n return video_id", "def video_details(video_id, check=True):\r\n url = VIDEO_DETAILS_URL.format(API_KEY, video_id)\r\n response = util.web.http_get(url=url, json=True, referer='https://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n try:\r\n if 'items' in response['json']:\r\n if len(response['json']['items']) is not 0:\r\n contentdetails = response['json']['items'][0]['contentDetails']\r\n if check:\r\n if 'regionRestriction' in contentdetails:\r\n if 'blocked' in contentdetails['regionRestriction']:\r\n blocked = contentdetails['regionRestriction']['blocked']\r\n if [i for e in ALLOWED_COUNTRIES for i in blocked if e in i]:\r\n log.info('%s is blocked in: %s' %\r\n (video_id, blocked))\r\n return None\r\n if 'allowed' in contentdetails['regionRestriction']:\r\n allowed = contentdetails['regionRestriction']['allowed']\r\n if [i for e in ALLOWED_COUNTRIES for i in allowed if e not in i]:\r\n log.info('%s is allowed in: %s' %\r\n (video_id, allowed))\r\n return None\r\n video_time = util.string_util.convert_to_millisecond(contentdetails['duration'])\r\n video_title = response['json']['items'][0]['snippet']['title'].encode('ascii', 'ignore')\r\n\r\n return {\r\n 'type': 'youTube',\r\n 'video_id': video_id,\r\n 'video_time': video_time,\r\n 'video_title': video_title\r\n }\r\n return None\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None", "def check_if_embedded(lookup_value, lookup_type='content_id'):\n video_json = get_video_info(lookup_value, lookup_type)\n return video_json['embedded']", "def get_embed_youtube(link = None):\n assert(link != None)\n assert(link != \"\")\n log.debug( \"preparsed link: \" + link)\n video_id = \"\"\n try:\n # break the link\n choppedLink = link.split(\"/\")\n if choppedLink[2].find(\"youtu.be\") >= 0:\n # Parse short link getting only last piece\n video_id = get_id_shortlink(choppedLink)\n elif choppedLink[3].find(\"attribution_link\") >= 0 :\n # Its an attribution link, a bit special\n video_id = get_id_attribution(choppedLink)\n else:\n # This should be a regular link\n video_id = get_id_regular_link(choppedLink)\n\n # and finally compose the embed link\n flink = compose_embed_youtube(video_id)\n log.debug( \"compound link: \" + flink)\n except Exception as e:\n log.error(\"Something weird happened when ending getting embed youtube\")\n log.exception(e)\n raise NotImplementedError( \"We are still working on links like \" + link)\n\n return flink", "def video_id(self, video_id):\n # type: (string_types) -> None\n\n if video_id is not None:\n if not isinstance(video_id, string_types):\n raise TypeError(\"Invalid type for `video_id`, type has to be `string_types`\")\n\n self._video_id = video_id", "def url(yt_id: str) -> str:\n return \"https://www.youtube.com/watch?v={}\".format(yt_id)", "def bb_youtube(hit):\n video = hit.group(1)\n return '<object width=\"425\" height=\"350\"><param name=\"movie\" value=\"http://www.youtube.com/v/%s\"></param><param name=\"wmode\" value=\"transparent\"></param><embed src=\"http://www.youtube.com/v/%s\" type=\"application/x-shockwave-flash\" wmode=\"transparent\" width=\"425\" height=\"350\"></embed></object>' % (video, video)", "def get_yt_video_id(url):\n\n from urlparse import urlparse, parse_qs\n\n if url.startswith(('youtu', 'www')):\n url = 'http://' + url\n\n query = urlparse(url)\n\n if 'youtube' in query.hostname:\n if query.path == '/watch':\n return parse_qs(query.query)['v'][0]\n elif query.path.startswith(('/embed/', '/v/')):\n return query.path.split('/')[2]\n elif 'youtu.be' in query.hostname:\n return query.path[1:]\n else:\n raise ValueError", "def get_embed(url, params=None):\n if not url:\n return None\n \n embed_dict = []\n if type(url) == dict:\n \n # For those weird cases when og:video:type is declared but not og:video\n if not 'url' in url:\n return None\n \n #video_type = url.get('type')\n embed_dict = url\n url = url['url']\n \n url = url.strip()\n \n p = urlparse.urlparse(url)\n for embed_subclass in Embed.__subclasses__():\n if embed_subclass.check_url(parse_url=p):\n return embed_subclass(url, params=None)\n # if not was between subclass of EmbedResource\n if all(x in embed_dict for x in ['video', 'url']):\n \n # custom for new.livestream.com, clear iframe inside the url, and add w&h\n if 'iframe src=' in embed_dict['video']:\n embed_dict['video'] = EmbedFactory._livestream_clean(embed_dict['video'])\n \n embed_url = urlparse.urlparse(embed_dict['video'])\n for provider in WHITELIST_EMBED_PROVIDERS:\n if re.search(provider.get('name', None), embed_url.netloc):\n params = {\"embed_url\": embed_url}\n params['provider_name'] = embed_dict['site_name'] if 'site_name' in embed_dict else None\n params['thumbnail_url'] = embed_dict['image'] if 'image' in embed_dict else None\n \n if provider.get('height', False) and 'height' in embed_dict:\n params['height'] = embed_dict.get('height', None)\n \n return GenericEmbed(embed_dict['url'], params)\n \n # This should be a \"generic\" case, mostly comming from og:video\n # Chose to disable it as it's very difficult to make it work for any generic case\n # Check http://soc.li/SHxUtad\n #return generic_video_dict(url, video_type)\n return None\n return None", "def test_parse_youtube_one_video(self):\r\n youtube_str = '0.75:jNCf2gIqpeE'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})", "def test_parse_youtube_one_video(self):\r\n youtube_str = '0.75:jNCf2gIqpeE'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})", "async def youtube(self, ctx, *, query):\n url = f\"https://www.googleapis.com/youtube/v3/search?part=snippet&q={query}&type=video&maxResults=1&key={google_api_key}\"\n response = requests.get(url)\n try:\n await ctx.send(\n f\"https://www.youtube.com/watch?v={response.json()['items'][0]['id']['videoId']}\"\n )\n except IndexError:\n await ctx.send(\"**No results for given query found.**\")", "def get_video_id(self, obj):\n return obj.video.id", "def from_youtube(cls, video_data, event):\n self = cls(event)\n\n metadata = self.metadata\n\n metadata['title'] = self.__calculate_title(video_data)\n self.filename = self.__calculate_slug()\n metadata['speakers'] = ['TODO'] # Needs human intervention later\n # youtube_id = video_data['display_id']\n # metadata['thumbnail_url'] =\n # 'https://i.ytimg.com/vi/{}/maxresdefault.jpg'.format(youtube_id)\n metadata['thumbnail_url'] = video_data['thumbnail']\n metadata['videos'] = [{\n 'type': 'youtube',\n 'url': video_data['webpage_url']\n }]\n metadata['recorded'] = self.__calculate_date_recorded(\n video_data['upload_date'])\n\n # optional values\n metadata['copyright_text'] = video_data['license']\n metadata['duration'] = video_data['duration'] # In seconds\n metadata['language'] = video_data['formats'][0].get(\n 'language', event.language)\n if not metadata['language']:\n metadata['language'] = event.language\n metadata['related_urls'] = copy.deepcopy(event.related_urls)\n\n if event.minimal_download:\n metadata['speakers'] = []\n metadata['tags'] = event.tags\n metadata['description'] = ''\n else:\n metadata['tags'] = sorted(\n set(video_data['tags']).union(set(event.tags)))\n metadata['description'] = video_data['description']\n description_urls = list(\n set(\n re.findall(r'http[s]?://[^ \\\\\\n\\t()[\\]\"`´\\']+', video_data[\n 'description'])))\n for url in description_urls:\n metadata['related_urls'].append({'label': url, 'url': url})\n\n return self", "def media_content_type(self):\n return MEDIA_TYPE_TRACK", "def get_video_url(data):\n # type: (dict) -> Optional[str]\n resource = data.get(\"resources\", [{}])[0]\n url = resource.get(\"video_stream\") # try m3u8\n if not url: # try mp4\n files = resource.get(\"files\")[0]\n mp4 = get_mime_property(files, \"url\", \"video/mp4\")\n url = \"https:{}\".format(mp4) if mp4 and mp4.startswith(\"//\") else mp4\n if not url: # try x-video\n idx = get_mime_property(files, \"mediaObjectId\", \"application/x-video\")\n media = get_json(LOS_MEDIA_TEMPLATE.format(idx))\n derivative = media.get(\"mediaObject\").get(\"derivatives\")[0]\n url = \"https://{}/{}\".format(\n derivative.get(\"fqdn\"),\n derivative.get(\"derivativeMediaUrl\").replace(\"mp4:\", \"\"))\n return url", "def customize_video(course_data, block_data):\n try:\n block_data['youtube_id'] = course_data['metadata']['youtube_id_1_0']\n except KeyError:\n block_data['youtube_id'] = None\n try:\n block_data['start_time'] = course_data['metadata']['start_time']\n except KeyError:\n block_data['start_time'] = None\n try:\n block_data['end_time'] = course_data['metadata']['end_time']\n except KeyError:\n block_data['end_time'] = None", "async def video(ctx, message):\n \"\"\":param: ctx\"\"\"\n \"\"\":param: message\"\"\"\n \"\"\"return video url\"\"\"\n link_list = []\n print ('Searching YouTube for: %s' % message)\n url = \"https://www.youtube.com/results?search_query=\" + message\n response = urlopen(url)\n html = response.read()\n soup = BeautifulSoup(html, \"lxml\")\n for vid in soup.findAll(attrs={'class': 'yt-uix-tile-link'}):\n link_list.append('https://www.youtube.com' + vid['href'])\n if(len(link_list) >=1):\n random_num = random.randint(0, len(link_list) - 1)\n await bot.say(link_list[random_num])\n else:\n await bot.say(\"there is no contente for \"+message)", "def movie_media_type(name):\n return name.endswith(('.ogv', '.vob', '.mp4', '.wmv', '.mov', '.mpeg'))", "def get_or_create_video(self):\n # Make sure LTI verification have run successfully\n assert getattr(self, \"_is_verified\", False) or self.verify()\n\n # If the video already exist, retrieve it from database\n try:\n return Video.objects.get(\n lti_id=self.resource_link_id,\n playlist__lti_id=self.context_id,\n playlist__consumer_site__name=self.consumer_site_name,\n )\n except Video.DoesNotExist:\n # Only create the video if the request comes from an instructor\n if not self.is_instructor:\n return None\n\n # Creating the video...\n # - Get the consumer site (we know it exists because the passport verified)\n consumer_site = ConsumerSite.objects.get(name=self.consumer_site_name)\n\n # - Get the playlist if it exists or create it\n playlist, _ = Playlist.objects.get_or_create(\n lti_id=self.context_id,\n consumer_site=consumer_site,\n defaults={\"title\": self.context_title},\n )\n\n # Create the video\n return Video.objects.create(\n lti_id=self.resource_link_id,\n title=self.resource_link_title,\n playlist=playlist,\n )", "def media_type(self) -> str:\n return self._media_type", "def test_task_video_download(url_to_video: str, empty_video_resource: VideoResource):\n download_video(url_to_video, empty_video_resource.id)\n empty_video_resource.refresh_from_db()\n video_instance = empty_video_resource.videos.filter(primary=True).first()\n\n assert empty_video_resource.videos.all()\n assert video_instance.extension == 'mp4'\n assert video_instance.primary\n for item in video_instance.video.open():\n assert item", "def youtube(self, youtube):\n\n self._youtube = youtube", "def is_video(mine=None, file=None):\n if file:\n mine = get_file_mine(file)\n print(mine)\n\n if mine:\n return mine.find('video') != -1\n\n return False", "def add_video_to_playlist(youtube, args, privacy=\"public\"):\n video_id = args['video_id']\n playlist_id = args['playlist_id']\n \n print(video_id)\n #print(type(args))\n \n if playlist_id:\n return add_video_to_existing_playlist(youtube, playlist_id, video_id)\n else:\n lib.debug(\"Error adding video to playlist\")", "def get_yt_link_by_id(video_id):\n info = InnerTube().player(video_id)\n det = info.get(\"videoDetails\", None)\n title = det.get(\"title\", None) if det else None\n streaming_data = info.get(\"streamingData\", None)\n fmts = streaming_data.get(\"formats\", None) if streaming_data else None\n\n if fmts:\n links = {Quality[i[\"itag\"]]: i[\"url\"] for i in fmts if i.get(\"itag\", -1) in Quality and \"url\" in i}\n\n if links and title:\n return links, title.replace(\"+\", \" \")\n\n cause = None\n status = info.get(\"playabilityStatus\", None)\n if status:\n cause = f\"[{status.get('status', '')}] {status.get('reason', '')}\"\n\n log(f\"{__class__.__name__}: Getting link to video with id '{video_id}' filed! Cause: {cause}\")\n\n return None, cause", "def play_youtube_video_service(service, media_id):\n target_players = component.extract_from_service(service)\n\n if media_id:\n for player in target_players:\n player.play_youtube(media_id)", "def test_note_has_video_attachment_is_false_for_note_with_other_mime_type(fake_note_with_pdf_attachment):\n\n note_id_value: str = str(uuid.uuid4())\n a_note = Note.from_dict(note_id_value, fake_note_with_pdf_attachment)\n assert not a_note.has_video_attachment()\n assert a_note.is_document\n assert not (\"video\" in a_note.mime_type)\n assert a_note.object_type == \"msdyn_workorder\"", "def test_youtube_iframe():\n\n sample = \".. youtube:: YID\\n :height: 400\\n :width: 600\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\n \"https://www.youtube-nocookie.com\"\n \"/embed/YID?rel=0&\"\n \"wmode=transparent\"\n ),\n \"height\": \"400\",\n \"width\": \"600\",\n \"frameborder\": \"0\",\n \"allowfullscreen\": \"\",\n \"allow\": \"encrypted-media\",\n },\n )", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n params = cgi.parse_qs(p.query)\n \n if p.path.endswith('/video'):\n # url type http://www.livestream.com/xprize/video?clipId=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2\n if 'clipId' in params:\n return params['clipId'][0]\n if p.path.startswith('/embed'):\n # url type http://cdn.livestream.com/embed/xprize?layout=4&amp;clip=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2&amp;width=560&amp;autoplay=false\n if 'clip' in params:\n return params['clip'][0]\n \n return ''", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n if not self.embed_url:\n self.embed_url = 'https://www.youtube.com/embed/%s?wmode=transparent' % self.get_video_id()\n \n return self.embed_url", "def getPlayer(self):\n \n youtube = self.isYouTube()\n player = self.isMediaPlayer()\n \n if youtube and player:\n if self.selection:\n return self.selection\n else:\n return 'external'\n elif youtube or player:\n if not self.isYouTube():\n return 'internal'\n if not self.isMediaPlayer():\n return 'external'\n else:\n return None", "def media_type(self):\n return self._media_type", "def get_video_by_id():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id + '/video.webm')\n ec = {'ThreatGrid.Sample.Id': sample_id}\n demisto.results([\n {\n 'Type': entryTypes['note'],\n 'EntryContext': ec,\n 'HumanReadable': '### ThreatGrid Sample Run Video File -\\n'\n + 'Your sample run video file download request has been completed successfully for '\n + sample_id,\n 'Contents': ec,\n 'ContentsFormat': formats['json']\n },\n fileResult(sample_id + '.webm', r.content)\n ])", "def fetch_youtube_url(search_term, dev_key=None):\r\n in_cache, video_id = check_if_in_cache(search_term)\r\n if in_cache:\r\n return YOUTUBE_VIDEO_URL + video_id\r\n if not dev_key:\r\n YOUTUBE_SEARCH_BASE = \"https://www.youtube.com/results?search_query=\"\r\n try:\r\n response = requests.get(YOUTUBE_SEARCH_BASE + search_term).content\r\n html_response = html.fromstring(response)\r\n video = html_response.xpath(\"//a[contains(@class, 'yt-uix-tile-link')]/@href\")\r\n video_id = re.search(\"((\\?v=)[a-zA-Z0-9_-]{4,15})\", video[0]).group(0)[3:]\r\n log.debug(f\"Found video id {video_id} for search term {search_term}\")\r\n _ = save_to_cache(search_term=search_term, video_id=video_id)\r\n return YOUTUBE_VIDEO_URL + video_id\r\n except AttributeError as e:\r\n log.warning(f\"Could not find scrape details for {search_term}\")\r\n capture_exception(e)\r\n return None\r\n except IndexError as e:\r\n log.warning(f\"Could not perform scrape search for {search_term}, got a different HTML\")\r\n capture_exception(e)\r\n return None\r\n else:\r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n developerKey=dev_key,\r\n cache_discovery=False)\r\n try:\r\n in_cache, video_id = check_if_in_cache(search_term)\r\n\r\n if not in_cache:\r\n search_response = youtube.search().list(q=search_term,\r\n part='id, snippet').execute()\r\n for v in search_response['items']:\r\n if v['id']['kind'] == VIDEO:\r\n video_id = v['id']['videoId']\r\n log.debug(f\"Adding Video id {video_id}\")\r\n _ = save_to_cache(search_term=search_term, video_id=video_id) \r\n return YOUTUBE_VIDEO_URL + video_id\r\n except HttpError as err:\r\n err_details = loads(err.content.decode('utf-8')).get('error').get('errors')\r\n secho(\"Couldn't complete search due to following errors: \", fg='red')\r\n for e in err_details:\r\n error_reason = e.get('reason')\r\n error_domain = e.get('domain')\r\n error_message = e.get('message')\r\n\r\n if error_reason == 'quotaExceeded' or error_reason == 'dailyLimitExceeded':\r\n secho(f\"\\tYou're over daily allowed quota. Unfortunately, YouTube restricts API keys to a max of 10,000 requests per day which translates to a maximum of 100 searches.\", fg='red')\r\n secho(f\"\\tThe quota will be reset at midnight Pacific Time (PT).\" ,fg='red')\r\n secho(f\"\\tYou can request for Quota increase from https://console.developers.google.com/apis/api/youtube.googleapis.com/quotas.\", fg='red')\r\n else:\r\n secho(f\"\\t Search failed due to {error_domain}:{error_reason}, message: {error_message}\")\r\n return None", "def youtube(code,width=400,height=250):\n\n return XML(\"\"\"<object width=\"%(width)s\" height=\"%(height)s\"><param name=\"movie\" value=\"http://www.youtube.com/v/%(code)s&hl=en_US&fs=1&\"></param><param name=\"allowFullScreen\" value=\"true\"></param><param name=\"allowscriptaccess\" value=\"always\"></param><embed src=\"http://www.youtube.com/v/%(code)s&hl=en_US&fs=1&\" type=\"application/x-shockwave-flash\" allowscriptaccess=\"always\" allowfullscreen=\"true\" width=\"%(width)s\" height=\"%(height)s\"></embed></object>\"\"\" % dict(code=code, width=width, height=height))", "def is_video_wanted(video: AnimeThemeVideo) -> bool:\n for k in ('nc','subbed','lyrics','uncen'):\n v = OPTIONS['filter'][k]\n if v is not None and video[k] ^ v:\n return False\n if video['resolution'] < OPTIONS['filter']['resolution']:\n return False\n if OPTIONS['filter']['source'] is not None and video['source'] != OPTIONS['filter']['source']:\n return False\n if OPTIONS['filter']['overlap'] is not None and video['overlap'] not in OPTIONS['filter']['overlap']: # uses lists\n return False\n \n return True", "def get_youtube_trailer_url(self) -> str:\n if self._trailer is None:\n self._find_trailer_url()\n return self._trailer", "def media_type(self, media_type: str):\n if media_type is None:\n raise ValueError(\"Invalid value for `media_type`, must not be `None`\")\n\n self._media_type = media_type", "def get_video_tag(self, node):\r\n return Video()", "def test_video_constructor(self):\r\n context = self.item_descriptor.render('student_view').content\r\n\r\n sources = {\r\n 'main': u'example.mp4',\r\n u'mp4': u'example.mp4',\r\n u'webm': u'example.webm',\r\n }\r\n\r\n expected_context = {\r\n 'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',\r\n 'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),\r\n 'data_dir': getattr(self, 'data_dir', None),\r\n 'display_name': u'A Name',\r\n 'end': 3610.0,\r\n 'id': self.item_descriptor.location.html_id(),\r\n 'show_captions': 'true',\r\n 'handout': None,\r\n 'sources': sources,\r\n 'speed': 'null',\r\n 'general_speed': 1.0,\r\n 'start': 3603.0,\r\n 'saved_video_position': 0.0,\r\n 'sub': u'a_sub_file.srt.sjson',\r\n 'track': None,\r\n 'youtube_streams': create_youtube_string(self.item_descriptor),\r\n 'yt_test_timeout': 1500,\r\n 'yt_api_url': 'www.youtube.com/iframe_api',\r\n 'yt_test_url': 'gdata.youtube.com/feeds/api/videos/',\r\n 'transcript_download_format': 'srt',\r\n 'transcript_download_formats_list': [{'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'}],\r\n 'transcript_language': u'en',\r\n 'transcript_languages': json.dumps(OrderedDict({\"en\": \"English\", \"uk\": u\"Українська\"})),\r\n 'transcript_translation_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'translation'\r\n ).rstrip('/?'),\r\n 'transcript_available_translations_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'available_translations'\r\n ).rstrip('/?'),\r\n }\r\n\r\n self.assertEqual(\r\n context,\r\n self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),\r\n )", "def test_api_video_instructor_initiate_live_invalid_type(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n playlist__title=\"foo bar\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # initiate a live video,\n # It should generate a key file with the Unix timestamp of the present time\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n live_info = {\n \"medialive\": {\n \"input\": {\n \"id\": \"medialive_input_1\",\n \"endpoints\": [\"https://live_endpoint1\", \"https://live_endpoint2\"],\n },\n \"channel\": {\"id\": \"medialive_channel_1\"},\n },\n \"mediapackage\": {\n \"id\": \"mediapackage_channel_1\",\n \"endpoints\": {\n \"hls\": {\n \"id\": \"endpoint1\",\n \"url\": \"https://channel_endpoint1/live.m3u8\",\n },\n },\n },\n }\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch.object(\n api, \"create_live_stream\", return_value=live_info\n ):\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n {\"type\": \"invalid\"},\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)\n content = json.loads(response.content)\n\n self.assertEqual(content, {\"type\": ['\"invalid\" is not a valid choice.']})", "def media_content_type(self):\n return MEDIA_TYPE_MUSIC", "def media_content_type(self):\n return MEDIA_TYPE_MUSIC", "def media_content_type(self):\n return MEDIA_TYPE_MUSIC", "def media_content_type(self):\n return MEDIA_TYPE_MUSIC", "async def download_video(v_url):\n reply = await v_url.get_reply_message()\n if v_url.pattern_match.group(2) != \"\":\n url = v_url.pattern_match.group(2)\n elif reply is not None:\n url = reply.message\n url = re.findall(r\"\\bhttps?://.*\\.\\S+\", reply.message)[0]\n else:\n return\n type = (\n v_url.pattern_match.group(1).lower()\n if v_url.pattern_match.group(1) is not None\n else \"a\"\n )\n await v_url.edit(\"`Preparing to download...`\")\n out_folder = Config.TMP_DOWNLOAD_DIRECTORY + \"youtubedl/\"\n Config.TMP_DOWNLOAD_DIRECTORY + \"/thumb_image.jpg\"\n if not os.path.isdir(out_folder):\n os.makedirs(out_folder)\n if type == \"a\":\n opts = {\n \"format\": \"bestaudio\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"key\": \"FFmpegMetadata\",\n \"writethumbnail\": True,\n \"embedthumbnail\": True,\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\n \"key\": \"FFmpegExtractAudio\",\n \"preferredcodec\": \"mp3\",\n \"preferredquality\": \"320\",\n }\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"quiet\": True,\n \"logtostderr\": False,\n }\n video = False\n song = True\n\n elif type == \"v\":\n opts = {\n \"format\": \"best\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"getthumbnail\": True,\n \"embedthumbnail\": True,\n \"xattrs\": True,\n \"writethumbnail\": True,\n \"key\": \"FFmpegMetadata\",\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\"key\": \"FFmpegVideoConvertor\", \"preferedformat\": \"mp4\"},\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"logtostderr\": False,\n \"quiet\": True,\n }\n song = False\n video = True\n\n try:\n await v_url.edit(\"`Fetching playlist data, please wait..`\")\n with YoutubeDL(opts) as ytdl:\n ytdl.extract_info(url)\n # print(ytdl_data['thumbnail'])\n filename = sorted(get_lst_of_files(out_folder, []))\n except DownloadError as DE:\n await v_url.edit(f\"`{str(DE)}`\")\n return\n except ContentTooShortError:\n await v_url.edit(\"`The download content was too short.`\")\n return\n except GeoRestrictedError:\n await v_url.edit(\n \"`Video is not available from your geographic location due to geographic restrictions imposed by a website.`\"\n )\n return\n except MaxDownloadsReached:\n await v_url.edit(\"`Max-downloads limit has been reached.`\")\n return\n except PostProcessingError:\n await v_url.edit(\"`There was an error during post processing.`\")\n return\n except UnavailableVideoError:\n await v_url.edit(\"`Media is not available in the requested format.`\")\n return\n except XAttrMetadataError as XAME:\n await v_url.edit(f\"`{XAME.code}: {XAME.msg}\\n{XAME.reason}`\")\n return\n except ExtractorError:\n await v_url.edit(\"`There was an error during info extraction.`\")\n return\n except Exception as e:\n await v_url.edit(f\"{str(type(e)): {str(e)}}\")\n return\n c_time = time.time()\n await v_url.edit(\"`YouTube Playlist Downloading Processing Now.\\nPlease Wait!`\")\n if song:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = True\n supports_streaming = False\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 180\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n try:\n ytdl_data_name_audio = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_audio[: (len(ytdl_data_name_audio) - 4)]\n + \".jpg\"\n )\n print(ytdl_data_name_audio)\n file_path = single_file\n song_size = file_size(file_path)\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_audio}`\"\n + \"\\n\"\n + f\"Size👉 {song_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n allow_cache=False,\n thumb=thumb,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_audio}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)\n if video:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = False\n supports_streaming = True\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 0\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n # print(ytdl_data)\n # for file in os.listdir(\"./DOWNLOADS/youtubedl/\"):\n # if file.endswith(\".jpg\"):\n # thumb = \"./DOWNLOADS/youtubedl/\" + file\n # print(os.path.join(\"./DOWNLOADS/youtubedl/\", file))\n # image_link = ytdl_data['thumbnail']\n # downloaded_image = wget.download(image_link,out_folder)\n # thumb = ytdl_data_name_video + \".jpg\"\n file_path = single_file\n video_size = file_size(file_path)\n try:\n ytdl_data_name_video = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_video[: (len(ytdl_data_name_video) - 4)]\n + \".jpg\"\n )\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_video}`\"\n + \"\\n\"\n + f\"Size👉 {video_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n thumb=thumb,\n allow_cache=False,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_video}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)", "def test_parse_youtube_invalid(self):\r\n\r\n # invalid id\r\n youtube_str = 'thisisaninvalidid'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': '',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})\r\n # another invalid id\r\n youtube_str = ',::,:,,'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': '',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})\r\n\r\n # and another one, partially invalid\r\n youtube_str = '0.75_BAD!!!,1.0:AXdE34_U,1.25:KLHF9K_Y,1.5:VO3SxfeD,'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': '',\r\n '1.00': 'AXdE34_U',\r\n '1.25': 'KLHF9K_Y',\r\n '1.50': 'VO3SxfeD'})", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n #logger.debug('DAILYMOTION VIDEO FOUND %s' % url)\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/embed/video/') or p.path.startswith('/swf/video/')):\n # http://www.dailymotion.com/embed/video/xmp7zw\n return re.sub('_.+', '', path_list[2])\n elif len(path_list) == 2 and (p.path.startswith('/video/') or p.path.startswith('/swf/')):\n # http://www.dailymotion.com/video/xmp7zw_whatever\n # http://www.dailymotion.com/swf/xmp7zw\n return re.sub('_.+', '', path_list[1])\n \n return ''", "def valid_media_type(media_type):\n return media_type in ACCEPTED_MEDIA_TYPES", "def test_api_video_create_by_playlist_admin(self):\n user = factories.UserFactory()\n playlist = factories.PlaylistFactory()\n factories.PlaylistAccessFactory(\n role=models.ADMINISTRATOR, playlist=playlist, user=user\n )\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n self.assertEqual(models.Video.objects.count(), 0)\n\n response = self.client.post(\n \"/api/videos/\",\n {\n \"lti_id\": \"video_one\",\n \"playlist\": str(playlist.id),\n \"title\": \"Some video\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 1)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(\n response.json(),\n {\n \"active_stamp\": None,\n \"description\": \"\",\n \"has_transcript\": False,\n \"id\": str(models.Video.objects.get().id),\n \"is_ready_to_show\": False,\n \"live_info\": {},\n \"live_state\": None,\n \"live_type\": None,\n \"playlist\": {\n \"id\": str(playlist.id),\n \"lti_id\": playlist.lti_id,\n \"title\": playlist.title,\n },\n \"should_use_subtitle_as_transcript\": False,\n \"show_download\": True,\n \"thumbnail\": None,\n \"timed_text_tracks\": [],\n \"title\": \"Some video\",\n \"upload_state\": \"pending\",\n \"urls\": None,\n \"xmpp\": None,\n },\n )", "def supportedType(request, video_types):\n return request.FILES['file'].content_type in video_types.keys()", "def get_video(lesson_id, video_id):\n url = '{0}?cat={1}&video={2}'.format(BASE_URL, lesson_id, video_id)\n page = requests.get(url, verify=False)\n soup = BeautifulSoup(page.content)\n return soup.find('iframe')['src'].split('/')[-1]", "def upload_video(self, video_file):\r\n part = \"snippet,status\"\r\n metadata = self.get_metadata(video_file)\r\n body = {\r\n \"snippet\": {\r\n \"title\": metadata['title'],\r\n \"description\": metadata['description'],\r\n \"tags\": metadata['categoryId'],\r\n \"categoryId\": metadata['categoryId']\r\n },\r\n \"status\": {\r\n \"privacyStatus\": \"public\",\r\n \"license\": \"youtube\", # temporary, see gh#414\r\n \"embeddable\": True,\r\n \"publicStatsViewable\": True\r\n }\r\n }\r\n # This is to fix a bug, the API thinks our .ogg files are audio/ogg\r\n mimetype = \"video/{}\".format(video_file.split(\".\")[-1])\r\n media_body = MediaFileUpload(video_file, chunksize=-1, resumable=True, mimetype=mimetype)\r\n insert_request = self.service.videos().insert(part=part, body=body, media_body=media_body)\r\n response = None\r\n error = None\r\n retry = 0\r\n sleep_seconds = 5.0\r\n while response is None:\r\n try:\r\n log.info(\"Uploading %s\" % video_file)\r\n (status, response) = insert_request.next_chunk()\r\n if 'id' in response:\r\n return (Response.SUCCESS, response)\r\n else:\r\n return (Response.UNEXPECTED_FAILURE, response)\r\n except HttpError as e:\r\n if e.resp.status in self.RETRIABLE_STATUS_CODES:\r\n error = \"A retriable HTTP error {} occurred:\\n{}\".format(e.resp.status, e.content)\r\n else:\r\n return (Response.UNRETRIABLE_ERROR, {\"status\": e.resp.status, \"content\": e.content})\r\n except self.RETRIABLE_EXCEPTIONS as e:\r\n error = \"A retriable error occurred: {}\".format(e)\r\n except client.AccessTokenRefreshError:\r\n return (Response.ACCESS_TOKEN_ERROR, None)\r\n if error is not None:\r\n log.error(error)\r\n retry += 1\r\n if retry > self.MAX_RETRIES:\r\n return (Response.MAX_RETRIES_REACHED, None)\r\n log.info(\"Sleeping %s seconds and then retrying...\" % sleep_seconds)\r\n time.sleep(sleep_seconds)", "def test_clean_no_image_or_youtube_id():\n resource = models.MediaResource()\n\n with pytest.raises(ValidationError):\n resource.clean()", "def get_video_id(lookup_value, lookup_type='url'):\n if lookup_type == 'url':\n video_id = lookup_value.split('videos/')[1]\n elif lookup_type == 'content_id' or lookup_type == 'id':\n video_json = core.get_data('contents', lookup_value, return_json=True)\n video_id = video_json['id']\n else:\n errors.handlers.bad_lookup_type(lookup_type, ('url', 'content_id'))\n return video_id", "def test_youtube_videos(dates):\n test = pycmc.charts.youtube.videos(dates[\"start\"])\n assert isinstance(test, list)\n assert len(test) > 90\n assert test[0][\"name\"] != \"\"\n assert test[0][\"id\"] != \"\"", "def _render_no_tracking(self, video_id):\n you_tube_url = (\n 'https://www.youtube.com/embed/%s'\n '?feature=player_embedded&amp;rel=0') % video_id\n iframe = cElementTree.XML(\"\"\"\n<div class=\"gcb-video-container\">\n <iframe class=\"youtube-player\" title=\"YouTube Video Player\"\n type=\"text/html\" frameborder=\"0\" allowfullscreen=\"allowfullscreen\">\n </iframe>\n</div>\"\"\")\n iframe[0].set('src', you_tube_url)\n return iframe", "def tekstowo_youtube_url(source):\n reg = re.compile(r\"var videoID = \\\"(.*)\\\";\")\n try:\n video_id = reg.search(source).group(1)\n except Exception:\n raise Exception(ERROR_STR + '[crawler] cannot find videoID')\n if not video_id:\n raise Exception(ERROR_STR + '[crawler] empty videoID')\n\n return \"https://www.youtube.com/watch?v=\" + video_id" ]
[ "0.6823963", "0.6766844", "0.6746436", "0.6112856", "0.60233295", "0.59815586", "0.5897715", "0.58677566", "0.5842102", "0.58315057", "0.580741", "0.580741", "0.580741", "0.580741", "0.58059746", "0.579559", "0.579133", "0.5763148", "0.57312804", "0.57054985", "0.56609565", "0.56567675", "0.56507206", "0.5647443", "0.56471074", "0.56462425", "0.5623117", "0.56217027", "0.5602246", "0.5602242", "0.5597771", "0.5597085", "0.55888903", "0.5577885", "0.5527462", "0.5513054", "0.5508624", "0.5505584", "0.55020136", "0.55020136", "0.5495267", "0.5483397", "0.5475606", "0.54734254", "0.54699826", "0.54625034", "0.54569805", "0.5445926", "0.5433693", "0.542133", "0.5418447", "0.5418447", "0.541802", "0.5375089", "0.53683037", "0.53557956", "0.53441143", "0.53422046", "0.53315043", "0.53284717", "0.53265923", "0.5319717", "0.5319574", "0.53156126", "0.5313607", "0.5308127", "0.5307358", "0.53021103", "0.53017855", "0.5291485", "0.5290883", "0.52793944", "0.52750653", "0.5265065", "0.52631366", "0.5248785", "0.524344", "0.5239339", "0.5234531", "0.52246416", "0.52221113", "0.5220174", "0.5219979", "0.5215938", "0.5215938", "0.5215938", "0.5215938", "0.52000225", "0.5198459", "0.5170193", "0.5152317", "0.51459646", "0.51363057", "0.5133774", "0.5126837", "0.5099418", "0.5092531", "0.507698", "0.50721395", "0.50720835" ]
0.78130394
0
A facet dict should produce correct choice labels
def test_choices_from_facets(self): fake_facets = { "doctype": {"foo": 1, "bar": 2, "baz": 3}, "has_transcription": {"true": 3, "false": 3}, } form = DocumentSearchForm() # call the method to configure choices based on facets form.set_choices_from_facets(fake_facets) # test doctype facets (FacetChoiceField) for choice in form.fields["doctype"].widget.choices: # choice is index id, label choice_label = choice[1] assert isinstance(choice_label, str) assert "<span>" in choice_label # test has_transcription facet (BooleanFacetField) bool_label = form.fields["has_transcription"].label assert isinstance(bool_label, str) assert "3</span>" in bool_label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_from_facets(self, facet_dict):\n # generate the list of choice from the facets\n\n self.choices = (\n (\n val,\n mark_safe(f'<span>{label}</span><span class=\"count\">{count:,}</span>'),\n )\n for val, (label, count) in facet_dict.items()\n )\n # pass the counts to the widget so it can be set as a data attribute\n self.widget.facet_counts = facet_dict", "def populate_from_facets(self, facet_dict):\n # each value of facet_counts is a tuple of label and count; boolean facet label is always\n # just \"true\" or \"false\"\n (label, count) = facet_dict.get(\"true\", (\"true\", 0))\n # use self.label for the actual label instead, so we use the field name and not true/false\n self.label = mark_safe(\n f'<span class=\"label\">{self.label}</span><span class=\"count\">{count:,}</span>'\n )\n\n # pass the counts to the widget so it can be set as a data attribute\n self.widget.facet_counts = facet_dict", "def set_choices_from_facets(self, facets):\n # borrowed from ppa-django;\n # populate facet field choices from current facets\n for key, facet_dict in facets.items():\n # restructure dict to set values of each key to tuples of (label, count)\n if key == \"type\":\n # for doctype, label should be translated, so use doctype object\n facet_dict = {\n label: (\n DocumentType.objects_by_label.get(label, _(\"Unknown type\")),\n count,\n )\n for (label, count) in facet_dict.items()\n }\n else:\n # for other formfields, label == facet name\n facet_dict = {\n label: (label, count) for (label, count) in facet_dict.items()\n }\n # use field from facet fields map or else field name as is\n formfield = self.solr_facet_fields.get(key, key)\n # for each facet, set the corresponding choice field\n if formfield in self.fields:\n self.fields[formfield].populate_from_facets(facet_dict)", "def test_create_facet_dictionary(self):\n pass", "def test_get_facet_dictionary(self):\n pass", "def test_list_facet_dictionaries(self):\n pass", "def test_update_facet_dictionary(self):\n pass", "def test_get_facet_dictionary_content(self):\n pass", "def facets(self, *args, **kwargs) -> Any:\n pass", "def test_update_facet_dictionary_content(self):\n pass", "def convert_facets(result, output, args):\n aggs = result.get('aggregations', {})\n output['facets'] = {}\n for facet in args.getlist('facet'):\n value = aggs.get(facet, {})\n data = {\n 'values': [convert_bucket(facet, b)\n for b in value.get('buckets', [])]\n }\n output['facets'][facet] = data\n return output", "def process(self, facet):\n pass", "def test_remove_facet_dictionary(self):\n pass", "def test_get_prior_string_dict(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices({'asdfa': 0.10, 2: 0.20, 3: 0.30, 'lalala': 0.40}, \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )", "def test_label_choices(self):\n test_classes = (\n (0, 'No'),\n (1, 'Yes')\n )\n\n form = SingleLabelClassifierForm(classes=test_classes)\n self.assertEqual(tuple(form.fields['label'].choices), test_classes)\n\n form = MultiLabelClassifierForm(classes=test_classes)\n self.assertEqual(tuple(form.fields['label'].choices), test_classes)", "def get_labels(self):\n if self.option == \"term\":\n return ['platform characteristics', 'atmospheric winds', 'radio wave','weather events', 'geomagnetism', 'atmospheric electricity','microwave', 'atmospheric temperature', 'atmospheric water vapor','atmospheric pressure', 'aerosols', 'atmospheric radiation','atmospheric chemistry', 'precipitation', 'sensor characteristics','radar', 'infrared wavelengths', 'visible wavelengths','weather/climate advisories', 'clouds', 'lidar', 'ocean optics','ultraviolet wavelengths', 'cryospheric indicators','land use/land cover', 'topography', 'surface thermal properties','spectral/engineering', 'soils', 'snow/ice', 'geothermal dynamics','natural hazards', 'surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics']\n elif self.option == \"mostdepth\":\n return ['flight data logs', 'turbulence', 'radio wave flux', 'lightning', 'magnetic field', 'atmospheric conductivity', 'electric field', 'data synchronization time', 'brightness temperature', 'vertical profiles', 'water vapor profiles', 'air temperature', 'upper level winds', 'atmospheric pressure measurements', 'upper air temperature', 'humidity', 'dew point temperature', 'aerosol particle properties', 'emissivity', 'trace gases/trace species', 'liquid precipitation', 'cloud liquid water/ice', 'microwave radiance', 'sensor counts', 'total pressure', 'airspeed/ground speed', 'total temperature', 'static pressure', 'wind speed', 'wind direction', 'radar reflectivity', 'doppler velocity', 'infrared imagery', 'visible imagery', 'water vapor', 'vertical wind velocity/speed', 'aerosol backscatter', 'weather forecast', 'tropical cyclones', 'visible radiance', 'infrared radiance', 'total precipitable water', 'boundary layer temperature', 'atmospheric temperature indices', 'cloud height', 'flight level winds', 'cloud droplet distribution', 'cloud droplet concentration/size', 'cloud condensation nuclei', 'cloud microphysics', 'hydrometeors', 'ozone', 'wind profiles', 'cloud base temperature', 'cloud base height', 'liquid water equivalent', 'solar radiation', 'planetary boundary layer height', 'surface winds', 'precipitation amount', 'precipitation rate', 'surface pressure', 'rain', 'cloud optical depth/thickness', 'aerosol extinction', 'aerosol optical depth/thickness', 'cirrus cloud systems', 'lidar depolarization ratio', 'radar backscatter', 'radar cross-section', 'return power', 'mean radial velocity', 'radiance', 'air quality', 'climate advisories', 'atmospheric emitted radiation', 'optical depth/thickness', 'surface temperature', 'ultraviolet flux', 'spectrum width', 'microwave imagery', 'lidar backscatter', 'relative humidity', 'u/v wind components', 'wind speed/wind direction', 'radar imagery', 'snow depth', 'land use/land cover classification', 'digital elevation/terrain model (dem)', 'snow', 'droplet size', 'droplet concentration/size', 'drizzle', 'precipitation anomalies', 'snow water equivalent', 'solid precipitation', 'total surface precipitation rate', 'particle size distribution', 'skin temperature', 'attitude characteristics', 'land surface temperature', 'hail', 'reflectance', 'soil moisture/water content', 'soil temperature', 'soil bulk density', 'surface roughness', 'present weather', 'snow density', 'ambient temperature', 'aerosol forward scatter', 'floods', 'snow cover', 'sigma naught', 'precipitable water', 'stage height', 'rivers/streams', 'shortwave radiation', 'photosynthetically active radiation', 'longwave radiation', 'net radiation', 'hourly precipitation amount', '24 hour precipitation amount', 'soil moisture', 'satellite orbits/revolution', 'sea surface temperature', 'heat flux', 'latent heat flux', 'cloud fraction', '3 and 6 hour precipitation amount', 'geopotential height', 'particulate matter', 'particle images', 'water vapor indices', 'horizontal wind velocity/speed', 'electrical conductivity', 'dissolved carbon dioxide', 'hurricanes', 'tropical cyclone track', 'convective clouds/systems (observed/analyzed)', 'cloud top height', 'viewing geometry', 'temperature profiles', 'vertical wind shear', 'wind shear', 'carbon monoxide', 'sea level pressure', 'water vapor tendency', 'potential temperature', 'angstrom exponent', 'ultraviolet radiation', 'solar irradiance', 'scattering', 'absorption', 'water vapor mixing ratio profiles', 'sea surface temperature indices', 'extreme eastern tropical pacific sst', 'sedimentation', 'erosion', 'sediment transport', 'sediments', 'tropopause', 'ocean chemistry', 'ocean optics', 'ocean temperature', 'salinity/density', 'pigments', 'ocean color', 'attenuation/transmission', 'inorganic carbon', 'organic carbon', 'photosynthetically available radiation', 'chlorophyll', 'optical depth', 'fluorescence', 'vegetation index', 'gelbstoff', 'phytoplankton', 'vegetation index2', 'cloud precipitable water', 'landscape ecology', 'ultraviolet radiance', 'cloud ceiling', 'aerosol radiance', 'carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles', 'organic particles', 'sulfate particles', 'radiative flux', 'transmittance', 'atmospheric stability', 'cloud asymmetry', 'cloud frequency', 'cloud top pressure', 'cloud top temperature', 'cloud vertical distribution', 'cloud emissivity', 'cloud radiative forcing', 'cloud reflectance', 'rain storms', 'reflected infrared', 'thermal infrared', 'incoming solar radiation', 'clouds', 'cloud properties', 'cloud types', 'orbital characteristics', 'sensor characteristics', 'maximum/minimum temperature', 'condensation', 'platform characteristics', 'geolocation', 'geodetics', 'coordinate reference system', 'aerosols', 'topographical relief maps', 'terrain elevation', 'normalized difference vegetation index (ndvi)', 'infrared flux', 'visible flux', 'albedo', 'land use/land cover', 'topography', 'lidar', 'lidar waveform', 'plant phenology', 'vegetation cover', 'crop/plant yields', 'land use classes', 'landscape patterns', 'forest harvesting and engineering', 'forest management', 'total surface water', 'agricultural plant science', 'photosynthesis', 'primary production', 'leaf characteristics', 'evapotranspiration', 'fire occurrence', 'surface thermal properties', 'canopy characteristics', 'evergreen vegetation', 'crown', 'deciduous vegetation', 'anisotropy', 'fire ecology', 'biomass burning', 'wildfires', 'topographical relief', 'burned area', 'surface radiative properties', 'environmental sustainability', 'boundaries', 'anthropogenic/human influenced ecosystems', 'emissions', 'sulfur dioxide', 'population', 'infrastructure', 'environmental assessments', 'public health', 'conservation', 'agriculture production', 'administrative divisions', 'economic resources', 'socioeconomics', 'lake/pond', 'rivers/stream', 'political divisions', 'environmental vulnerability index (evi)', 'ecosystems', 'urban areas', 'sustainability', 'treaty agreements/results', 'human settlements', 'population estimates', 'nitrogen dioxide', 'cropland', 'pasture', 'particulates', 'cyclones', 'mortality', 'environmental impacts', 'droughts', 'earthquakes', 'population distribution', 'fertilizers', 'animal manure and waste', 'urbanization/urban sprawl', 'landslides', 'avalanche', 'urban lands', 'mangroves', 'volcanic eruptions', 'pesticides', 'population size', 'population density', 'lakes/reservoirs', 'surface water', 'rural areas', 'infant mortality rates', 'amphibians', 'mammals', 'carbon', 'sulfur oxides', 'methane', 'non-methane hydrocarbons/volatile organic compounds', 'nitrogen oxides', 'natural gas', 'coal', 'coastal elevation', 'biodiversity functions', 'nuclear radiation exposure', 'radiation exposure', 'poverty levels', 'malnutrition', 'wetlands', 'sea level rise', 'vulnerability levels/index', 'ground water', 'snow/ice', 'electricity', 'energy production/use', 'sustainable development', 'deforestation', 'household income', 'discharge/flow', 'hydropattern', 'nitrogen', 'phosphorus', 'carbon dioxide', 'alpine/tundra', 'forests', 'vegetation', 'permafrost', 'nutrients', 'plant characteristics', 'leaf area index (lai)', 'soil gas/air', 'ammonia', 'nitrous oxide', 'ecosystem functions', 'litter characteristics', 'soil chemistry', 'soil respiration', 'active layer', 'soil depth', 'cation exchange capacity', 'organic matter', 'soil porosity', 'soil texture', 'permafrost melt', 'land subsidence', 'freeze/thaw', 'surface water features', 'chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride', 'molecular hydrogen', 'sulfur compounds', 'fire models', 'biomass', 'dominant species', 'vegetation species', 'sulfur', 'tree rings', 'soil classification', 'heat index', 'sea ice concentration', 'ocean heat budget', 'reforestation', 'even-toed ungulates', 'species recruitment', 'population dynamics', 'range changes', 'topographic effects', 'land resources', 'river ice depth/extent', 'snow melt', 'river ice', 'animal commodities', 'animal ecology and behavior', 'phenological changes', 'water depth', 'inundation', 'forest fire science', 'biogeochemical cycles', 'radiative forcing', 'soil heat budget', 'drainage', 'respiration rate', 'river/lake ice breakup', 'river/lake ice freeze', 'reclamation/revegetation/restoration', 'permafrost temperature', 'indigenous/native species', 'fire dynamics', 'lichens', 'plants', 'plant succession', 'carbon flux', 'coastal', 'salt marsh', 'degradation', 'altitude', 'carbon and hydrocarbon compounds', 'halocarbons and halogens', 'forest composition/vegetation structure', 'water vapor indicators', 'barometric altitude', 'atmospheric water vapor', 'terrestrial ecosystems', 'volatile organic compounds', 'boundary layer winds', 'forest fire danger index', 'periglacial processes', 'landscape processes', 'evaporation', 'soil horizons/profile', 'shrubland/scrub', 'soil ph', 'soils', 'soil water holding capacity', 'community structure', 'pingo', 'soil color', 'virtual temperature', 'formaldehyde', 'hydroxyl', 'photolysis rates', 'cloud dynamics', 'nitric oxide', 'molecular oxygen', 'smog', 'peroxyacyl nitrate', 'hydrogen compounds', 'nitrogen compounds', 'oxygen compounds', 'stable isotopes', 'chemical composition', 'actinic flux', 'tropospheric ozone', 'fossil fuel burning', 'industrial emissions', 'denitrification rate', 'sunshine', 'runoff', 'soil structure', 'mosses/hornworts/liverworts', 'peatlands', 'hydraulic conductivity', 'snow/ice temperature', 'vegetation water content', 'discharge', 'chlorophyll concentrations', 'outgoing longwave radiation', 'geomorphic landforms/processes', 'soil compaction', 'soil impedance', 'canopy transmittance', 'water table', 'decomposition', 'water temperature', 'dissolved gases', 'total dissolved solids', 'agricultural expansion', 'forest science', 'pressure tendency', 'visibility', 'biomass dynamics', 'agricultural lands', 'grasslands', 'savannas', 'grazing dynamics/plant herbivory', 'herbivory', 'paleoclimate reconstructions', 'drought indices', 'fire weather index', 'animal yields', 'multivariate enso index', 'dissolved solids', 'ocean currents', 'salinity', 'coastal processes', 'atmospheric pressure', 'afforestation/reforestation', 'fresh water river discharge', 'surface water chemistry', 'drainage basins', 'resource development site', 'dunes', 'flood plain', 'endangered species', 'precipitation indices', 'temperature indices', 'forest yields', 'stratigraphic sequence', 'freeze/frost', 'frost', 'hydrogen cyanide', 'land management', 'nutrient cycling', 'industrialization', 'suspended solids', 'deserts', 'weathering', 'gas flaring', 'atmospheric temperature', 'ice extent', 'fraction of absorbed photosynthetically active radiation (fapar)', 'marshes', 'swamps', 'lake ice', 'atmospheric winds', 'watershed characteristics', 'transportation', 'soil rooting depth', 'isotopes', 'cultural features', 'consumer behavior', 'boundary surveys', 'aquifers', 'land productivity', 'water quality/water chemistry', 'sediment composition', 'dissolved oxygen', 'surface water processes/measurements', 'turbidity', 'conductivity', 'ph', 'calcium', 'magnesium', 'potassium', 'micronutrients/trace elements', 'social behavior', 'sulfate', 'sediment chemistry', 'biogeochemical processes', 'water ion concentrations', 'cropping systems', 'percolation', 'groundwater chemistry', 'reforestation/revegetation', 'species/population interactions', 'soil infiltration', 'alkalinity', 'soil fertility', 'phosphorous compounds', 'radioisotopes', 'cooling degree days', 'angiosperms (flowering plants)', 'glacial landforms', 'glacial processes', 'contour maps', 'estuaries', 'methane production/use', 'natural gas production/use', 'petroleum production/use', 'visualization/image processing', 'subsetting/supersetting', 'transformation/conversion', 'forest mensuration', 'acid deposition', 'differential pressure', 'precipitation', 'marine ecosystems', 'consumption rates', 'radio wave', 'soil organic carbon (soc)', 'soil erosion', 'halocarbons', 'trace elements/trace metals', 'biomass energy production/use', 'riparian wetlands', 'soil consistence', 'snow stratigraphy', 'thermal conductivity', 'estuary', 'tidal height', 'plant diseases/disorders/pests', 'layered precipitable water', 'atmospheric chemistry', 'water vapor concentration profiles', 'specific humidity', 'total runoff', 'pressure thickness', 'wind stress', 'atmospheric heating', 'conduction', 'hydrogen chloride', 'nitric acid', 'radar', 'land surface/agriculture indicators', 'satellite soil moisture index', 'chlorine nitrate', 'chlorofluorocarbons', 'dinitrogen pentoxide', 'antenna temperature', 'glaciers', 'ice sheets', 'dimethyl sulfide', 'potential vorticity', 'ice fraction', 'atmospheric radiation', 'runoff rate', 'temperature tendency', 'wind dynamics', 'wind direction tendency', 'base flow', 'bromine monoxide', 'chlorine monoxide', 'methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy', 'cloud base pressure', 'temperature anomalies', 'nitrate', 'ocean mixed layer', 'precipitation trends', 'temperature trends', 'convection', 'ground ice', 'oxygen', 'phosphate', 'solar induced fluorescence', 'chlorine dioxide', 'sun-earth interactions', 'uv aerosol index', 'volcanic activity', 'potential evapotranspiration', 'ultraviolet wavelengths', 'ice temperature', 'sea surface skin temperature', 'sea surface height', 'sublimation', 'convective surface precipitation rate', 'hydrogen fluoride', 'airglow', 'energy deposition', 'x-ray flux', 'electron flux', 'proton flux', 'magnetic fields/magnetic currents']\n else:\n return ['platform characteristics', 'atmospheric winds','radio wave', 'weather events', 'geomagnetism','atmospheric electricity', 'microwave', 'atmospheric temperature','atmospheric water vapor', 'atmospheric pressure', 'aerosols','atmospheric radiation', 'atmospheric chemistry', 'precipitation','sensor characteristics', 'radar', 'infrared wavelengths','visible wavelengths', 'weather/climate advisories', 'clouds','lidar', 'ocean optics', 'ultraviolet wavelengths','cryospheric indicators', 'land use/land cover', 'topography','surface thermal properties', 'spectral/engineering', 'soils','snow/ice', 'geothermal dynamics', 'natural hazards','surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics','flight data logs','wind dynamics', 'radio wave flux', 'lightning', 'magnetic field','atmospheric conductivity', 'electric field','data synchronization time', 'brightness temperature','upper air temperature', 'water vapor profiles','surface temperature', 'upper level winds','atmospheric pressure measurements', 'water vapor indicators','aerosol particle properties', 'emissivity','trace gases/trace species', 'liquid precipitation','cloud microphysics', 'microwave radiance', 'sensor counts','total pressure', 'airspeed/ground speed', 'total temperature','static pressure', 'humidity', 'radar reflectivity','doppler velocity', 'infrared imagery', 'visible imagery','aerosol backscatter', 'weather forecast', 'tropical cyclones','visible radiance', 'infrared radiance','atmospheric temperature indices', 'cloud droplet distribution','cloud condensation nuclei', 'hydrometeors', 'oxygen compounds','wind profiles', 'liquid water equivalent', 'solar radiation','planetary boundary layer height', 'surface winds','precipitation amount', 'precipitation rate', 'surface pressure','aerosol extinction', 'aerosol optical depth/thickness','tropospheric/high-level clouds (observed/analyzed)','lidar depolarization ratio', 'radar backscatter','radar cross-section', 'return power', 'radial velocity','radiance', 'climate advisories', 'atmospheric emitted radiation','optical depth/thickness', 'ultraviolet flux', 'spectrum width','microwave imagery', 'lidar backscatter', 'radar imagery','snow depth', 'land use/land cover classification','terrain elevation', 'solid precipitation', 'droplet size','droplet concentration/size', 'precipitation anomalies','snow water equivalent', 'total surface precipitation rate','skin temperature', 'water vapor', 'attitude characteristics','land surface temperature', 'reflectance','soil moisture/water content', 'soil temperature','soil bulk density', 'surface roughness', 'present weather','snow density', 'geothermal temperature','aerosol forward scatter', 'floods', 'snow cover', 'sigma naught','precipitable water', 'surface water processes/measurements','surface water features', 'shortwave radiation','photosynthetically active radiation', 'longwave radiation','net radiation', 'flight level winds', 'soil moisture','satellite orbits/revolution', 'heat flux','precipitation profiles', 'geopotential height','particulate matter', 'particle images', 'water vapor indices','electrical conductivity', 'gases', 'sea surface temperature','convective clouds/systems (observed/analyzed)','viewing geometry', 'wind shear','carbon and hydrocarbon compounds', 'sea level pressure','water vapor processes', 'ultraviolet radiation','solar irradiance', 'scattering', 'absorption','sea surface temperature indices', 'sedimentation', 'erosion','sediment transport', 'sediments', 'tropopause', 'nan', 'pigments','attenuation/transmission', 'inorganic carbon', 'organic carbon','photosynthetically available radiation', 'chlorophyll','optical depth', 'fluorescence', 'vegetation index', 'gelbstoff','plankton', 'vegetation index2', 'landscape ecology','ultraviolet radiance', 'aerosol radiance','carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles','organic particles', 'sulfate particles', 'radiative flux','transmittance', 'atmospheric stability','cloud radiative transfer', 'rain storms', 'reflected infrared','thermal infrared', 'incoming solar radiation', 'cloud types','orbital characteristics', 'geolocation','coordinate reference system', 'infrared flux', 'visible flux','albedo', 'lidar waveform', 'plant phenology', 'vegetation cover','crop/plant yields', 'land use classes', 'landscape patterns','forest harvesting and engineering', 'forest management','ecosystem functions', 'leaf characteristics', 'fire ecology','total surface water', 'primary production', 'photosynthesis','canopy characteristics', 'evergreen vegetation', 'crown','deciduous vegetation', 'anisotropy', 'biomass burning','wildfires', 'topographical relief','environmental sustainability','anthropogenic/human influenced ecosystems', 'emissions','sulfur compounds', 'environmental assessments', 'conservation','agriculture production', 'administrative divisions','freshwater ecosystems', 'political divisions', 'urban areas','treaty agreements/results', 'population estimates','nitrogen compounds', 'particulates', 'mortality', 'droughts','earthquakes', 'population distribution', 'fertilizers','animal manure and waste', 'urbanization/urban sprawl','landslides', 'avalanche', 'mangroves', 'volcanic eruptions','pesticides', 'population size', 'population density','rural areas', 'amphibians', 'mammals', 'carbon', 'sulfur oxides','land management', 'natural gas', 'sedimentary rocks','coastal elevation', 'community dynamics','nuclear radiation exposure', 'radiation exposure','poverty levels', 'malnutrition', 'sea level rise','vulnerability levels/index', 'electricity','energy production/use', 'sustainable development','deforestation', 'household income', 'nitrogen', 'phosphorus','terrestrial ecosystems', 'permafrost', 'nutrients','plant characteristics', 'soil gas/air', 'litter characteristics','soil chemistry', 'soil respiration', 'active layer', 'soil depth','cation exchange capacity', 'organic matter', 'soil porosity','soil texture', 'permafrost melt','ground water processes/measurements', 'freeze/thaw','halocarbons and halogens', 'hydrogen compounds', 'biomass','dominant species', 'vegetation species', 'sulfur', 'tree rings','soil classification', 'sea ice concentration', 'reforestation','species/population interactions', 'range changes','topographic effects', 'land resources', 'river ice depth/extent','snow melt', 'river ice', 'animal ecology and behavior','phenological changes', 'forest fire science', 'radiative forcing','soil heat budget', 'river/lake ice breakup','river/lake ice freeze', 'reclamation/revegetation/restoration','lichens', 'marine ecosystems', 'coastal landforms', 'degradation','forest composition/vegetation structure', 'barometric altitude','volatile organic compounds', 'forest fire danger index','periglacial processes', 'landscape processes','soil horizons/profile', 'soil ph', 'soil water holding capacity','fluvial landforms', 'soil color', 'glacial processes','photochemistry', 'cloud dynamics', 'nitrogen oxides', 'smog','chemical composition', 'actinic flux', 'tropospheric ozone','fossil fuel burning', 'industrial emissions','denitrification rate', 'sunshine', 'soil structure','mosses/hornworts/liverworts', 'hydraulic conductivity','snow/ice temperature', 'water characteristics','outgoing longwave radiation', 'soil compaction', 'soil impedance','canopy transmittance', 'ground water features', 'solids','agricultural expansion', 'pressure tendency', 'visibility','herbivory', 'paleoclimate reconstructions', 'drought indices','fire weather index', 'animal yields', 'teleconnections','carbon dioxide', 'dissolved solids', 'ocean currents', 'salinity','afforestation/reforestation', 'fresh water river discharge','surface water chemistry', 'aeolian landforms','precipitation indices', 'temperature indices', 'forest yields','stratigraphic sequence', 'freeze/frost', 'frost','industrialization', 'ice core records', 'suspended solids','weathering', 'gas flaring', 'ice extent', 'biogeochemical cycles','lake ice', 'isotopes', 'watershed characteristics','transportation', 'soil rooting depth', 'geochemical properties','carbon monoxide', 'cultural features', 'consumer behavior','boundary surveys', 'land productivity', 'sediment composition','calcium', 'magnesium', 'potassium','micronutrients/trace elements', 'sediment chemistry','biogeochemical processes', 'cropping systems','groundwater chemistry', 'reforestation/revegetation','soil infiltration', 'soil fertility','angiosperms (flowering plants)', 'glacial landforms','forest mensuration', 'acid deposition', 'differential pressure','soil erosion', 'trace elements/trace metals', 'soil consistence','snow stratigraphy', 'thermal conductivity', 'estuaries','tidal height', 'plant diseases/disorders/pests','pressure thickness', 'atmospheric heating', 'conduction','evaporation', 'turbulence', 'wind stress','satellite soil moisture index', 'antenna temperature', 'glaciers','ice sheets', 'nitrate', 'ocean mixed layer','precipitation indicators', 'temperature indicators', 'ground ice','alkalinity', 'dissolved gases', 'oxygen', 'ph', 'phosphate','solar induced fluorescence', 'volcanic activity','ice temperature', 'sea surface height', 'airglow','energy deposition', 'x-ray flux', 'electron flux', 'proton flux','magnetic fields/magnetic currents', 'vertical profiles','air temperature', 'dew point temperature','cloud liquid water/ice', 'wind speed', 'wind direction','vertical wind velocity/speed', 'total precipitable water','boundary layer temperature', 'cloud height','cloud droplet concentration/size', 'ozone','cloud base temperature', 'cloud base height', 'rain','cloud optical depth/thickness', 'cirrus/systems','mean radial velocity', 'relative humidity', 'u/v wind components','wind speed/wind direction','digital elevation/terrain model (dem)', 'snow', 'drizzle','particle size distribution', 'hail', 'ambient temperature','stage height', 'rivers/streams', 'hourly precipitation amount','24 hour precipitation amount', 'latent heat flux','cloud fraction', '3 and 6 hour precipitation amount','horizontal wind velocity/speed', 'dissolved carbon dioxide','hurricanes', 'tropical cyclone track', 'cloud top height','temperature profiles', 'vertical wind shear','water vapor tendency', 'potential temperature','angstrom exponent', 'water vapor mixing ratio profiles','extreme eastern tropical pacific sst', 'phytoplankton','cloud precipitable water', 'cloud asymmetry', 'cloud ceiling','cloud frequency', 'cloud top pressure', 'cloud top temperature','cloud vertical distribution', 'cloud emissivity','cloud radiative forcing', 'cloud reflectance','maximum/minimum temperature', 'condensation','topographical relief maps', 'evapotranspiration','fire occurrence', 'burned area', 'sulfur dioxide', 'lake/pond','rivers/stream', 'nitrogen dioxide', 'agricultural lands','cyclones', 'urban lands', 'lakes/reservoirs','infant mortality rates', 'methane','non-methane hydrocarbons/volatile organic compounds', 'coal','biodiversity functions', 'wetlands', 'discharge/flow','hydropattern', 'alpine/tundra', 'forests','leaf area index (lai)', 'ammonia', 'nitrous oxide','land subsidence', 'normalized difference vegetation index (ndvi)','chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride','molecular hydrogen', 'fire models', 'heat index','even-toed ungulates', 'species recruitment','population dynamics', 'water depth', 'inundation', 'drainage','respiration rate', 'permafrost temperature','indigenous/native species', 'fire dynamics', 'plant succession','coastal', 'salt marsh', 'boundary layer winds', 'shrubland/scrub','community structure', 'pingo', 'virtual temperature','formaldehyde', 'hydroxyl', 'photolysis rates', 'nitric oxide','molecular oxygen', 'peroxyacyl nitrate', 'stable isotopes','runoff', 'vegetation water content', 'discharge','chlorophyll concentrations', 'water table', 'decomposition','water temperature', 'total dissolved solids', 'biomass dynamics','grasslands', 'savannas', 'grazing dynamics/plant herbivory','multivariate enso index', 'drainage basins','resource development site', 'dunes', 'flood plain','endangered species', 'hydrogen cyanide', 'nutrient cycling','deserts','fraction of absorbed photosynthetically active radiation (fapar)','aquifers', 'dissolved oxygen', 'turbidity', 'conductivity','sulfate', 'water ion concentrations', 'percolation','phosphorous compounds', 'radioisotopes', 'cooling degree days','contour maps', 'methane production/use','natural gas production/use', 'petroleum production/use','consumption rates', 'soil organic carbon (soc)', 'halocarbons','biomass energy production/use', 'estuary','layered precipitable water', 'water vapor concentration profiles','hydrogen chloride', 'nitric acid', 'chlorine nitrate','chlorofluorocarbons', 'dinitrogen pentoxide', 'dimethyl sulfide','vorticity', 'ice fraction', 'temperature tendency','wind direction tendency', 'bromine monoxide', 'chlorine monoxide','methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy','cloud base pressure', 'temperature anomalies','precipitation trends', 'temperature trends', 'convection','chlorine dioxide', 'uv aerosol index','sea surface skin temperature', 'sublimation','convective surface precipitation rate', 'hydrogen fluoride']", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }", "def test_get_item_facets(integrated_ff):\n key, ff_env = integrated_ff['ff_key'], integrated_ff['ff_env']\n item_type = 'experiment_set_replicate'\n facets = ff_utils.get_item_facets(item_type, key=key, ff_env=ff_env)\n assert 'Lab' in facets\n assert 'Center' in facets\n assert 'Set Type' in facets\n assert 'Internal Release Date' in facets", "def decision_value_names(self, run_idx):\n enum_grp = self.decision_grp(run_idx)\n rev_enum = {}\n for decision_name, dset in enum_grp.items():\n value = dset[()]\n rev_enum[value] = decision_name\n\n return rev_enum", "def test_grouping_tag(self):\r\n input_tags = ('radio', 'checkbox')\r\n self.context['status'] = Status('correct')\r\n xpath = \"//section[@id='forinput1_choiceinput_0bc']\"\r\n\r\n self.context['value'] = {}\r\n for input_type in input_tags:\r\n self.context['input_type'] = input_type\r\n xml = self.render_to_xml(self.context)\r\n self.assert_has_xpath(xml, xpath, self.context)", "def enabled_facets(self, group_name):\n setting = self.enabled_facets_setting(group_name)\n try:\n value = setting.json_value\n except ValueError as e:\n logging.error(\"Invalid list of enabled facets for %s: %s\",\n group_name, setting.value)\n if value is None:\n value = list(\n FacetConstants.DEFAULT_ENABLED_FACETS.get(group_name, [])\n )\n return value", "def _timeseries_scatter_plot_lbls(self, results_dict, keys, axes, meta):\n if meta[\"var_combination\"].partition(\":\")[-1] == \"tas\":\n against_region = \"Global\"\n else:\n against_region = (\n f\"{self.cfg['region'][2]}$^o$ N-{self.cfg['region'][3]}\"\n f\"$^o$ N latitudinal belt\")\n large_scale_units = self.formatter(\n str(\n iris.load_cube(\n results_dict['large_scale'][keys[0][-1]]).units))\n regional_units = self.formatter(\n str(iris.load_cube(results_dict['regional'][keys[1][-1]]).units))\n xlabel = (f\"{against_region} \"\n f\"{meta['var_combination'].partition(':')[-1].upper()} \"\n f\"[{large_scale_units}]\")\n axes.set_xlabel(xlabel)\n ylabel = (f\"{self.cfg['region_name']} \"\n f\"{meta['var_combination'].partition(':')[0].upper()} \"\n f\"[{regional_units}]\")\n axes.set_ylabel(ylabel)\n\n axes.set_title(f\"Scenario: {meta['title_format']} \\n CMIP5: rval=\"\n f\"{meta['rvalue']['cmip5']:.3f}; \"\n f\"slope={meta['slope']['cmip5']:.3f} \"\n f\"\\n CMIP6: rval={meta['rvalue']['cmip6']:.3f}; \"\n f\"slope={meta['slope']['cmip6']:.3f}\")\n axes.legend(handles=meta[\"legend_elements\"])\n\n long_name_dict = {\"pr\": \"precipitation\", \"tas\": \"temperature\"}\n if meta[\"var_combination\"] == \"pr:tas\":\n suptitle = (f\"{self.cfg['region_name']} {meta['season'].upper()} \"\n f\"precipitation vs global {meta['season'].upper()} \"\n f\"temperature.\\n 10yr rolling means 1960-2100, \"\n f\"Baseline: 1986-2005\")\n plt.suptitle(suptitle)\n else:\n y_combination = meta[\"var_combination\"].partition(':')[0]\n suptitle = (f\"{self.cfg['region_name']} vs {against_region} \"\n f\"{meta['season'].upper()} \"\n f\"{long_name_dict[y_combination]}\"\n f\".\\n 10yr rolling means 1960-2100, \"\n f\"Baseline: 1986-2005\")\n plt.suptitle(suptitle)\n return suptitle", "def test_dict_labels_sorted(self):\n le = {3: \"a\", 2: \"c\", 1: \"b\"}\n oz = ClassificationScoreVisualizer(GaussianNB(), encoder=le)\n npt.assert_array_equal(oz._labels(), [\"b\", \"c\", \"a\"])", "def plot_languages(dict_usage_complexities, dict_cognitive_complexity):\n attested_languages = (\n frozenset(['nor', 'and', 'or', 'not']),\n frozenset(['and', 'or', 'not']),\n frozenset(['and', 'not']),\n frozenset(['or', 'not']),\n )\n\n fig, ax = plt.subplots(figsize=(8.27,4))\n for name in dict_usage_complexities.keys():\n\n # if not any([i in ['nc', 'nic', 'bc', 'XOR', 'c', 'ic'] for i in name]) and 'not' in name:\n if 'not' in name:\n # if True:\n\n usage_complexity = dict_usage_complexities[name]\n cognitive_complexity = dict_cognitive_complexity[name]\n\n if name in attested_languages:\n color = 'red'\n zorder = 10\n if name == frozenset(['or', 'not']):\n yshift = 0.4\n else:\n yshift = 0\n ax.text(\n usage_complexity + 0.02,\n cognitive_complexity + 0.3 + yshift,\n s=','.join(name),\n fontsize='x-small'\n )\n else:\n color='black'\n zorder = 1\n\n# ax.scatter(\n # usage_complexity, cognitive_complexity,\n# color=color,\n# zorder=zorder\n# )\n # ax.text(\n # usage_complexity, cognitive_complexity,\n # s=','.join(name),\n # fontsize='xx-small',\n # rotation=90,\n # color=color\n # )\n ax.scatter(usage_complexity,cognitive_complexity,color=color)\n\n ax.set_xlabel('Usage complexity')\n ax.set_ylabel('Conceptual complexity')\n # ax.set_xlim(0,3)\n ax.set_xlim(1.05,2.8)\n\n # plt.show()\n plt.savefig('figure.png', dpi=300, transparent=True)", "def setLabels(self):\r\n # productive\r\n profprint()\r\n self.option = {0:'Ba',\r\n 1:'Bb',\r\n 2:'Bc',\r\n 3:'Bd',\r\n 4:'Be',\r\n 5:'Bf',\r\n 6:'Bg',\r\n 7:'Bh',\r\n 8:'Bi',\r\n 9:'Bj',\r\n 10:'Bk',\r\n 11:'Bl',\r\n 12:'Ca',\r\n 13:'Cb',\r\n 14:'Cc',\r\n 15:'Cd',\r\n 16:'Ce',\r\n 17:'Cf',\r\n 18:'Cg',\r\n 19:'Ch',\r\n 20:'Ci',\r\n 21:'Cj',\r\n 22:'Ck',\r\n 23:'Cl',\r\n 24:'Cm',\r\n 25:'Cn',\r\n 26:'Co',\r\n 27:'Cp',\r\n 28:'Cq',\r\n 29:'Cr',\r\n 30:'Da',\r\n 31:'Db',\r\n 32:'Dc',\r\n 33:'Dd',\r\n 34:'De',\r\n 35:'Df',\r\n 36:'Dg',\r\n 37:'Dh',\r\n 38:'Di',\r\n 39:'Dj',\r\n 40:'Ea',\r\n 41:'Eb',\r\n 42:'Ec',\r\n 43:'Ed',\r\n 44:'Ee',\r\n 45:'Ef',\r\n 46:'Eg',\r\n 47:'Eh',\r\n 48:'Aa',\r\n 49:'Ab',\r\n 50:'Ac',\r\n 51:'Ad',\r\n 52:'Ae',\r\n 53:'Af',\r\n 54:'Iu',\r\n 55:'Fa',\r\n 56:'Fb',\r\n 57:'Fc',\r\n 58:'Fd',\r\n 59:'Fe',\r\n 60:'Ff',\r\n 61:'Fg',\r\n 62:'Fh',\r\n 63:'--'}\r\n\r\n return self.option", "def setLabels(self):\n #productive\n profprint()\n self.option = {0:'Ba',\n 1:'Bb',\n 2:'Bc',\n 3:'Bd',\n 4:'Be',\n 5:'Bf',\n 6:'Bg',\n 7:'Bh',\n 8:'Bi',\n 9:'Bj',\n 10:'Bk',\n 11:'Bl',\n 12:'Ca',\n 13:'Cb',\n 14:'Cc',\n 15:'Cd',\n 16:'Ce',\n 17:'Cf',\n 18:'Cg',\n 19:'Ch',\n 20:'Ci',\n 21:'Cj',\n 22:'Ck',\n 23:'Cl',\n 24:'Cm',\n 25:'Cn',\n 26:'Co',\n 27:'Cp',\n 28:'Cq',\n 29:'Cr',\n 30:'Da',\n 31:'Db',\n 32:'Dc',\n 33:'Dd',\n 34:'De',\n 35:'Df',\n 36:'Dg',\n 37:'Dh',\n 38:'Di',\n 39:'Dj',\n 40:'Ea',\n 41:'Eb',\n 42:'Ec',\n 43:'Ed',\n 44:'Ee',\n 45:'Ef',\n 46:'Eg',\n 47:'Eh',\n 48:'Aa',\n 49:'Ab',\n 50:'Ac',\n 51:'Ad',\n 52:'Ae',\n 53:'Af',\n 54:'Iu', \n 55:'Fa',\n 56:'Fb',\n 57:'Fc',\n 58:'Fd',\n 59:'Fe',\n 60:'Ff',\n 61:'Fg',\n 62:'Fh',\n 63:'--'}\n\n return self.option", "def facet_terms(facet):\n facetterms = []\n results = elasticsearch.facet_terms(settings.ELASTICSEARCH_HOST_PORT,\n settings.DOCUMENT_INDEX, facet['name'], order='term')\n if facet.get('terms', []):\n # precoordinate\n # IMPORTANT: topics and facility term IDs are int. All others are str.\n term_counts = {}\n for t in results['terms']:\n term_id = extract_term_id(t['term'])\n term_count = t['count']\n if term_id and term_count:\n term_counts[term_id] = term_count\n # make URLs for terms\n for term in facet['terms']:\n term['url'] = reverse('ui-search-term-query', args=(facet['id'], term['id']))\n # add counts to terms\n for term in facet['terms']:\n term_id = term['id']\n if isinstance(term_id, int):\n term_id = str(term_id)\n term['count'] = term_counts.get(term_id, 0)\n facetterms.append(term)\n else:\n # postcoordinate\n for t in results['terms']:\n t['title'] = t['term']\n t['description'] = ''\n t['url'] = '/search/%s:%s/' % (facet['id'], t['term'])\n facetterms.append(t)\n return facetterms", "def _gather_components(self):\n comps = set()\n for data in self._collection:\n for c in data.components:\n if c in comps:\n continue\n label = \"%s (%s)\" % (c, data.label)\n label = disambiguate(label, self._labels)\n self._labels[label] = c\n comps.add(c)", "def visualizeChosenOptions(self):\n \n # Visualising the results\n plt.hist(self.opt_selected)\n plt.title('Histogram of ads selections')\n plt.xlabel('Ads')\n plt.ylabel('Number of times each ad was selected')\n plt.show()", "def lyft_labels():\n\n return {\n 0: 'None',\n 7: 'Roads',\n 10: 'Vehicles'\n }", "def test_select_label(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n section1 = create_section(title=\"Test Section 1\", story=story, layout=layout)\n section2 = create_section(title=\"Test Section 2\", story=story, layout=layout)\n form = SectionRelationAdminForm()\n choices_list = list(form.fields['parent'].widget.choices)\n self.assertIn(story.title, choices_list[1][1])\n self.assertIn(story.title, choices_list[2][1])", "def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}", "def facets(self, facets):\n\n self._facets = facets", "def set_label(termtype, timeperiod):\n label = 'Graph these comma-separated noun phrases (yearly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Yearly' \\\n else 'Graph these comma-separated noun phrases (monthly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Monthly' \\\n else 'Graph these comma-separated entity mentions (yearly frequencies):' if termtype == 'Entity mentions' and timeperiod == 'Yearly' \\\n else 'Graph these comma-separated entity mentions (monthly frequencies):' if termtype == 'entity mentions' and timeperiod == 'Monthly' \\\n else 'Enter a phrase and get similar terms and the distribution of its \"cluster\"'\n return label", "def get_field_info(self, field):\n field_info = super(SimpleMetadata, self).get_field_info(field)\n if self.should_detail_choices(field, field_info):\n field_info['choices'] = [\n {\n 'value': choice_value,\n 'display_name': force_text(choice_name, strings_only=True)\n }\n for choice_value, choice_name in field.choices.items()\n ]\n\n return field_info", "def test_render_value_label(self):\n self.check_html(\n self.widget(choices=self.beatles),\n \"beatles\",\n [\"John\"],\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"J\">John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\">George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )", "def get_context(self, name, value, attrs=None, choices=()):\n context = super(TriStateCheckboxSelectMultiple, self).get_context(\n name, value, attrs\n )\n\n choices = dict(it.chain(self.choices, choices))\n if value is None:\n value = dict.fromkeys(choices, False)\n else:\n value = dict(dict.fromkeys(choices, False).items() +\n value.items())\n\n context['values'] = [\n (choice, label, value[choice])\n for choice, label in choices.iteritems()\n ]\n\n return context", "def facets(self):\n return self._facets", "def get_val_labels(self):\n raise NotImplementedError", "def pre_validate(self, form):\n for item1,item2 in self.choices:\n if isinstance(item2, (list, tuple)):\n group_label = item1\n group_items = item2\n for val,label in group_items:\n if val == self.data:\n return\n else:\n val = item1\n label = item2\n if val == self.data:\n return\n raise ValueError(self.gettext('Not a valid choice!'))", "def qubit_values(self):\n return self.label", "def test_get_search_facet_values(integrated_ff):\n key, ff_env = integrated_ff['ff_key'], integrated_ff['ff_env']\n query = 'search/?lab.display_title=4DN+DCIC%2C+HMS&type=Biosource'\n facets = ff_utils.get_search_facet_values(query, key=key, ff_env=ff_env)\n print(facets)\n assert 'Project' in facets\n assert '4DN' in facets['Project']\n assert 'Tissue' in facets\n assert 'brain' in facets['Tissue']\n assert 'Status' in facets", "def field_choices_used_to_translated_value():\r\n LANGUAGES = (\r\n ('en', 'English'),\r\n ('ru', 'Russian'),\r\n )\r\n\r\n from django.db import models\r\n\r\n class Article(models.Model):\r\n name = models.CharField(max_length=200)\r\n language = models.CharField(max_length=200, choices=LANGUAGES)\r\n\r\n def __unicode__(self):\r\n return self.name\r\n\r\n class ArticleTable(tables.Table):\r\n class Meta:\r\n model = Article\r\n\r\n table = ArticleTable([Article(name='English article', language='en'),\r\n Article(name='Russian article', language='ru')])\r\n\r\n assert 'English' == table.rows[0]['language']\r\n assert 'Russian' == table.rows[1]['language']", "def test_tlabel(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n label = \"T\"\n ax.set_tlabel(label)\n assert ax.get_tlabel() == label", "def get_choice(choice):\r\n return {\r\n '0': ('Extraversion', 0.07),\r\n '1': ('Neuroticism', 0.27),\r\n '2': ('Agreeableness', 0.11),\r\n '3': ('Conscientiousness', 0.09),\r\n '4': ('Openness', 0.45)\r\n }.get(choice, (None, None))", "def labels(self):\n\n param=self\n\n l=len(param)\n\n sweep_label=[]\n\n for index,name in enumerate(param.names):\n\n sweep_label.append((\\\n ''.join([c for c in name if c.isupper()]))\\\n .replace(\"IDT\",\"\")\\\n .replace(\"S\",\"\")\\\n .replace(\"M\",\"\"))\n\n stringout=[]\n\n unique={name:list(dict.fromkeys(values)) for name,values in zip(param.names,param.values)}\n\n for i in range(l):\n\n tmp_lab=''\n\n for lab,name in zip(sweep_label,self.names):\n\n tmp_lab=tmp_lab+lab+str(unique[name].index(param()[name][i]))\n\n stringout.append(tmp_lab)\n\n return stringout", "def evaluate_value_choice_with_dict(value_choice: ChoiceOf[T], chosen: dict[str, Choice]) -> T:\n choice_inner_values = []\n for choice in value_choice.inner_choices():\n if choice.label not in chosen:\n raise KeyError(f'{value_choice} depends on a value with key {choice.label}, but not found in {chosen}')\n choice_inner_values.append(chosen[choice.label])\n return value_choice.evaluate(choice_inner_values)", "def get_labels(self):\n return [\"contradiction\", \"entailment\", \"neutral\"]", "def label_data(data):\n if data == 'cat': return [1, 0]\n elif data == 'dog': return [0, 1]", "def _getPrettyNamesTransformer(self, working_stats, params):\n\n filtered_stat = {}\n logic = params['choices_logic']\n for choice, result in working_stats.iteritems():\n entity = logic.getFromKeyName(choice)\n filtered_stat[entity.name] = result\n\n return filtered_stat", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def test_get_item_facet_values(integrated_ff):\n key, ff_env = integrated_ff['ff_key'], integrated_ff['ff_env']\n item_type = 'ExperimentSetReplicate'\n facets = ff_utils.get_item_facet_values(item_type, key=key, ff_env=ff_env)\n assert 'Project' in facets\n assert '4DN' in facets['Project']\n assert 'Assay Details' in facets\n assert 'Target: YFG protein' in facets['Assay Details']\n assert 'Status' in facets", "def __str__(self):\n return gettext('One of %s') % self._get_choices_str()", "def test_prior_name(self):\n dim = Dimension(\"yolo\", \"reciprocal\", 1e-10, 1)\n assert dim.prior_name == \"reciprocal\"\n\n dim = Dimension(\"yolo\", \"norm\", 0.9)\n assert dim.prior_name == \"norm\"\n\n dim = Real(\"yolo\", \"uniform\", 1, 2)\n assert dim.prior_name == \"uniform\"\n\n dim = Integer(\"yolo1\", \"uniform\", -3, 6)\n assert dim.prior_name == \"int_uniform\"\n\n dim = Integer(\"yolo1\", \"norm\", -3, 6)\n assert dim.prior_name == \"int_norm\"\n\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\"yolo\", categories)\n assert dim.prior_name == \"choices\"", "def get_label(settings):", "def test_product_labels(self):\n\n prd = Product.objects.get(id=1)\n # label name\n label_name = prd._meta.get_field('name').verbose_name\n self.assertEqual(label_name, 'name')\n # label description\n label_name = prd._meta.get_field('description').verbose_name\n self.assertEqual(label_name, 'description')\n # label nutrition_grade\n label_name = prd._meta.get_field('nutrition_grade').name\n self.assertEqual(label_name, 'nutrition_grade')\n # label barcode\n label_name = prd._meta.get_field('barcode').verbose_name\n self.assertEqual(label_name, 'barcode')\n # label url\n label_name = prd._meta.get_field('url').verbose_name\n self.assertEqual(label_name, 'url')\n # label url_pic\n label_name = prd._meta.get_field('url_pic').name\n self.assertEqual(label_name, 'url_pic')\n # label store\n label_name = prd._meta.get_field('store').verbose_name\n self.assertEqual(label_name, 'store')\n # label prd_cat\n label_name = prd._meta.get_field('prd_cat').name\n self.assertEqual(label_name, 'prd_cat')\n # label fat\n label_name = prd._meta.get_field('fat').verbose_name\n self.assertEqual(label_name, 'fat')\n # label saturated_fat\n label_name = prd._meta.get_field('saturated_fat').name\n self.assertEqual(label_name, 'saturated_fat')\n # label sugar\n label_name = prd._meta.get_field('sugar').verbose_name\n self.assertEqual(label_name, 'sugar')\n # label salt\n label_name = prd._meta.get_field('salt').verbose_name\n self.assertEqual(label_name, 'salt')", "def compatible_format(self, tuner_method, label):\n\n if tuner_method == 'grid_search':\n return self.values\n\n elif tuner_method in ['random_search', 'tpe', 'annealing']:\n return hp.choice(label, self.values)\n\n elif tuner_method == 'gaussian_process':\n return tuple(self.values)", "def hotspot_fields_plot(self, results_dict, tas_bound=None, pr_bound=None):\n sorted_keys = [(f\"{period}_{season}_{variable}\"\n f\"_{project}_{results_dict['scenario']}\")\n for variable in self.variables\n for period in self.cfg[\"future_periods\"]\n for project in self.projects for season in self.seasons]\n sorted_keys = [\n sorted_keys[:len(sorted_keys) // 2],\n sorted_keys[len(sorted_keys) // 2:]\n ]\n ancestor_files_var = [[\n ancestor_file for ancestor_file in results_dict[\"ancestors\"]\n if f\"/{var}_\" in ancestor_file\n ] for var in self.variables]\n for ancestor_files, keys, variable in zip(ancestor_files_var,\n sorted_keys, self.variables):\n fig = plt.figure(figsize=(14.4, 3.4),\n constrained_layout=True,\n dpi=300)\n plt.gcf().subplots_adjust()\n # bound colorbar to abs(max) value on the map\n style = self.cb_bounds(variable, results_dict, keys,\n [tas_bound, pr_bound])\n # plot each panel\n fill, frame = self._hotspot_fields_plot_panels(\n results_dict, fig, keys, style)\n # plot figtexts\n self._hotspot_fields_plot_figtexts(results_dict['scenario'], frame)\n # plot line\n self._hotspot_fields_plot_line(fig, frame)\n # plot colorbar\n cbar = plt.colorbar(fill,\n plt.gcf().add_axes([0.25, 0.125, 0.5, 0.04]),\n orientation=\"horizontal\",\n extend=\"both\")\n if variable == \"pr\":\n cbar.set_label(\"%\")\n against_region = (\n f\"{self.cfg['region'][2]}$^o$ N-\"\n f\"{self.cfg['region'][3]}$^o$ N latitudinal belt\")\n else:\n cbar.set_label(\n self.formatter(str(results_dict[keys[-1]].units)))\n against_region = \"global\"\n\n # plot title and save\n self._hotspot_fields_plot_save(against_region, variable,\n results_dict['scenario'],\n ancestor_files)", "def ground_truth_label_facet(self):\n return self._ground_truth_label_facet", "def get_options(self):\n additional_data = self.get_additional_data()\n options_out = []\n for key, value in additional_data['DIMENSION_VALUES'].items():\n key_label = ' '.join(key.split('_')).strip().title()\n data = {'specification': key_label, 'specification_key': key, 'choices': value}\n options_out.append(data)\n return options_out", "def _fact2label(self, ax_ndx, fact_ndx):\n if len(self._dims) > 1:\n key,value = self._factors[ax_ndx][fact_ndx]\n else:\n if fact_ndx == 1:\n return ''\n key,value = self._factors[ax_ndx][0]\n return '{} = {}'.format(key,value) if key != '' else ''", "def autolabel(rects):", "def header_choices(self, axis):\n self.cur_axis = axis\n\n # This will hold all the other elements\n self.chooseAxisScreen = FloatLayout()\n\n # Inside of the float layout, we'll have a grid layout\n self.headerButtons = GridLayout(\n cols=2, size_hint_y=0.7, size_hint_x=0.9, \n pos_hint={'x': 0.05, 'top': 0.9})\n self.chooseAxisScreen.add_widget(self.headerButtons)\n\n # and a label which appears when the user doesn't make a selection\n self.axis_missing = Label(\n color = (1.0, .27, 0.0, 1.0), \n pos_hint = {'x': 0.15, 'y': 0.01}, size_hint_y = 0.1, \n size_hint_x = 0.5)\n self.chooseAxisScreen.add_widget(self.axis_missing)\n\n \n # and a label which appears when the user selects a non-numeric\n # axis\n self.non_numeric_axis = Label(\n color = (1.0, .27, 0.0, 1.0), \n pos_hint = {'x': 0.15, 'y': 0.01}, size_hint_y = 0.1, \n size_hint_x = 0.5)\n self.chooseAxisScreen.add_widget(self.non_numeric_axis)\n \n # and a label which explains why non-numeric y-axis values\n # are disabled\n # Note: size_hint_y used to be 0.1\n self.y_axis_disabled_label = Label(\n color = (1.0, .27, 0.0, 1.0), \n pos_hint = {'x': 0.15, 'y': 0.001}, \n size_hint_y = 0.3, \n size_hint_x = 0.5,\n )\n self.chooseAxisScreen.add_widget(self.y_axis_disabled_label)\n\n if self.non_numeric_x_axis:\n # and a checkbox that the user may select if they want to\n # count the occurrences of their x-axis\n self.count_x_checkbox = CheckBox(\n size_hint_y = 0.1, size_hint_x = 0.5,\n pos_hint = {'x': .0, 'y': 0.9},\n )\n self.chooseAxisScreen.add_widget(self.count_x_checkbox)\n self.count_x_checkbox.bind(active=self.record_count_checkbox)\n\n self.count_x_label = Label(\n pos_hint = {'x': 0.2, 'y': 0.9},\n text = ('Count the occurrences of my x-axis '\n '\\n instead of using a y-axis'),\n size_hint_y = 0.1, \n size_hint_x = 0.5,\n color = (0.22, 0.67, 0.91, 1))\n self.chooseAxisScreen.add_widget(self.count_x_label)\n\n\n # Set arguments for the Next button on_press\n if (axis == 'x'):\n self.data_needed = 'x'\n self.next_axis = 'y'\n elif (axis == 'y'):\n self.data_needed = 'y'\n self.next_axis = None\n\n # and a \"Next\" button\n # Thanks to https://stackoverflow.com/questions/12368390\n # for help with the lambda\n # Setting on_press is not passing a callback to the button,\n # but actually executing the function.\n # Passing in an unnamed lambda function will call the\n # desired function when the on_press event is raised\n # Thanks to https://stackoverflow.com/questions/16215045\n # for help with the throw-away argument _\n nextButton = Button(\n text = 'Next', size_hint_y=0.15, size_hint_x=0.2, \n pos_hint={'x': 0.79, 'y': 0.01}, \n on_press = lambda _: self.ensureInput(\n self.data_needed, self.axis_missing, self.next_axis))\n self.chooseAxisScreen.add_widget(nextButton)\n\n # We want to disable the buttons which represent non-numerical\n # values, if the user is currently selecting the data to\n # use for their y-axis. This disables them and explains to\n # the user why the buttons are disabled.\n with open(self.filename, 'rU+') as f:\n df = pd.read_csv(f, sep=self.delim, index_col=False)\n\n someButtonsDisabled = False\n for header in self.headers:\n btn = Button(text=header)\n if self.cur_axis == 'y':\n if df[header].dtype == 'object':\n btn.disabled = True\n someButtonsDisabled = True\n if someButtonsDisabled:\n self.y_axis_disabled_label.text = self.y_axis_disabled_explanation\n else:\n self.y_axis_disabled_label.text = ''\n btn.bind(on_press=self.assign_header) \n self.headerButtons.add_widget(btn)\n content = self.chooseAxisScreen\n title = 'Select your ' + self.cur_axis + '-axis'\n self.popup = Popup(content=content, title=title, size_hint=(1.0, 1.0))\n self.popup.open()", "def _get_params_from_dict(in_):\n\n idx = np.array(list(in_.keys()))\n d = idx.dtype\n if np.issubdtype(d, np.floating) or np.issubdtype(d, np.integer):\n xticklabels, indexby_str = idx, 'Scalar'\n else:\n xticklabels, indexby_str = [str(i) for i in idx], 'item'\n return xticklabels, indexby_str", "def _categoricals_axes(plotted, feature=None):\n # draw must be called so that axes labels can be manipulated correctly\n draw()\n\n # Define rotation angle\n if feature == \"jobType\":\n rot_angle = 60\n else:\n rot_angle = 36\n\n # Rotate x-axis labels or remove them entirely in the case of companyID\n if feature == \"companyId\":\n plotted.set_xticklabels([])\n else:\n plotted.set_xticklabels(plotted.get_xticklabels(), rotation=rot_angle)\n\n # Add units to salary label\n if plotted.get_ylabel() == \"salary\":\n plotted.set_ylabel(\"Salary / 1000 USD\")", "def get_highly_correlated_feature_names(self):", "def set_labels(self):\n\n if 1 <= self.selected_data <= 2:\n self.plot_select.setLabel(\"left\", \"P (kPa)\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"P (kPa)\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n elif self.selected_data == 3:\n self.plot_select.setLabel(\"left\", \"ext\", \"\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"ext\", \"\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n elif self.selected_data == 4:\n self.plot_select.setLabel(\"left\", \"U\", \"V\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"U\", \"V\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n # self.plot_simulate.setLabel(\"left\", \"ext\", \"\")\n # self.plot_simulate.setLabel(\"bottom\", \"t\", \"s\")\n\n self.plot_distribution.setLabel(\"left\", \"N ×10¹⁰ (#/m³)\")\n self.plot_distribution.setLabel(\"bottom\", \"d_p\", \"m\")\n self.plot_distribution.showGrid(y=True)\n\n self.plot_rotatometer.setLabel(\"left\", \"N ×10¹⁰ (#/m³)\")\n self.plot_rotatometer.setLabel(\"bottom\", \"laimennusvirtaus\")\n self.plot_rotatometer.showGrid(y=True)", "def make_labels(painting):\n labels = {}\n for dcTitleLang, dcTitle in \\\n painting['object']['proxies'][0]['dcTitle'].iteritems():\n labels[dcTitleLang] = {'language': dcTitleLang, 'value': dcTitle[0]}\n return labels", "def fix_facetgrid_axis_labels(facet_grid, shared_in_center=False,\n x=True, y=True) -> None:\n # regarding the choice of shared_in_center: WWMDD?\n if shared_in_center:\n # TODO maybe add a axes over / under the FacetGrid axes, with the same\n # shape, and label that one (i think i did this in my gui or one of the\n # plotting fns. maybe plot_traces?)\n raise NotImplementedError\n else:\n for ax in facet_grid.axes.flat:\n if not (ax.is_first_col() and ax.is_last_row()):\n if x:\n ax.set_xlabel('')\n if y:\n ax.set_ylabel('')", "def textfacet(self, filterelem):\n return data.TextFacet(self, filterelem)", "def __init__(self, choices, *args, **kwargs):\n super(RangePollChoiceForm, self).__init__(*args, **kwargs)\n nominees = [(i, '%d' % i) for i in range(0, choices.count()+1)]\n for choice in choices:\n self.fields['range_poll__%s' % str(choice.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=nominees,\n label=choice.nominee.get_full_name()))", "def decision_enum(self, run_idx):\n\n enum_grp = self.decision_grp(run_idx)\n enum = {}\n for decision_name, dset in enum_grp.items():\n enum[decision_name] = dset[()]\n\n return enum", "def labels(self) -> dict:\n raise NotImplementedError", "def test_checkboxtextgroup(self):\r\n self.check_group('checkboxtextgroup', 'choice', 'checkbox')", "def assign_choice_names(self):\r\n\r\n for index, choice in enumerate(self.xml.xpath('//*[@id=$id]//choice',\r\n id=self.xml.get('id'))):\r\n choice.set(\"name\", \"choice_\" + str(index))", "def predicted_label_facet(self):\n return self._predicted_label_facet", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def enrichmentBoxPlot(labelScheme):\n fig, ax = plt.subplots()\n x1 = range(0, 1600)\n if labelScheme == \"Strict Labeling Successful Compounds\":\n plt.title(\"Box Plot for Triaging Active Compounds\", fontname=\"Times New Roman\", fontsize=13)\n historic_hits = pickle.load(open(\"pickles/historic_hits_conventional_standard.pkl\", \"rb\"))\n ML_hits = pickle.load(open(\"pickles/ML_hits_conventional_standard.pkl\", \"rb\"))\n hist_text_height = 650\n ml_text_height = 400\n if labelScheme == \"Strict Labeling Missed Successful Compounds\":\n plt.title(\"Box Plot for Triaging Active Compounds\\nwith Rank Greater than 40\", fontname=\"Times New Roman\", fontsize=13)\n historic_hits = pickle.load(open(\"pickles/historic_hits_conventional_standard.pkl\", \"rb\"))\n historic_hits = [x for x in historic_hits if x > 40]\n ML_hits = pickle.load(open(\"pickles/ML_hits_conventional_standard.pkl\", \"rb\"))\n ML_hits = [x for x in ML_hits if x > 40]\n hist_text_height = 650\n ml_text_height = 400 \n if labelScheme == \"Strict Labeling Successful Compounds - ML\":\n plt.title(\"Box Plot for Triaging Active Compounds (by ML Standard)\", fontname=\"Times New Roman\", fontsize=13)\n historic_hits = pickle.load(open(\"pickles/historic_hits_ML_standard.pkl\", \"rb\"))\n ML_hits = pickle.load(open(\"pickles/ML_hits_ML_standard.pkl\", \"rb\"))\n hist_text_height = 550\n ml_text_height = 320 \n if labelScheme == \"Strict Labeling Missed Successful Compounds - ML\":\n plt.title(\"Box Plot for Triaging Active Compounds\\nwith Rank Greater than 40 (by ML Standard)\", fontname=\"Times New Roman\", fontsize=13)\n historic_hits = pickle.load(open(\"pickles/historic_hits_ML_standard.pkl\", \"rb\"))\n historic_hits = [x for x in historic_hits if x > 40]\n ML_hits = pickle.load(open(\"pickles/ML_hits_ML_standard.pkl\", \"rb\"))\n ML_hits = [x for x in ML_hits if x > 40]\n hist_text_height = 1520\n ml_text_height = 500 \n hist_avg, hist_sample_size, hist_Q3 = np.mean(historic_hits), len(historic_hits), np.quantile(historic_hits, .75)\n ml_avg, ml_sample_size, ml_Q3 = np.mean(ML_hits), len(ML_hits), np.quantile(ML_hits, .75)\n ax.boxplot([historic_hits, ML_hits], widths=(.45, .45))\n ##data labels\n ax.annotate(\"average rank = {:.0f}\\nsample size = {}\".format(hist_avg, hist_sample_size),\n xy=(1, hist_text_height), xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom', fontname=\"Times New Roman\", fontsize=10)\n ax.annotate(\"average rank = {:.0f}\\nsample size = {}\".format(ml_avg, ml_sample_size),\n xy=(2, ml_text_height), xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom', fontname=\"Times New Roman\", fontsize=10)\n ##axis labels for low and high priority\n plt.gcf().text(.11, .88, \"Lowest\\nPriority\", ha='center', fontname=\"Times New Roman\", fontsize=9)\n plt.gcf().text(.11, .1, \"Highest\\nPriority\", ha='center', fontname=\"Times New Roman\", fontsize=9)\n xlabels = [\"null\", \"Conventional Method's Priority Queue\\n(PQC)\", \"ML Method's Priority Queue\\n(PQML)\"]\n ax.set_ylabel(\"Rank in Priority Queue\", fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=10)\n ax.set_xticklabels(xlabels,fontsize=10, fontname=\"Times New Roman\")\n ax.set_ylim((0,2000))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.gcf().subplots_adjust(left=.22) #default: left = 0.125, right = 0.9, bottom = 0.1, top = 0.9\n plt.savefig(\"matplotlib_figures/box_plot_enrichment_{}.png\".format(labelScheme), dpi=300)", "def dedup_inner_choices(value_choices: list[ValueChoiceX]) -> dict[str, ParameterSpec]:\n result = {}\n for value_choice in value_choices:\n for choice in value_choice.inner_choices():\n param_spec = ParameterSpec(choice.label, 'choice', choice.candidates, (choice.label, ), True, size=len(choice.candidates))\n if choice.label in result:\n if param_spec != result[choice.label]:\n raise ValueError('Value choice conflict: same label with different candidates: '\n f'{param_spec} vs. {result[choice.label]}')\n else:\n result[choice.label] = param_spec\n return result", "def _choice_activation(self):\n support_clusters = group_elements_by_support(self.choices)\n choice_hierarchy={}; initially_active_choices=[]\n if len(support_clusters)>0: #There are choices in the RMPyL program\n initially_active_choices=support_clusters[0]\n for choice_cluster in support_clusters[1:]:\n cluster_support = choice_cluster[0].support\n for c in self.choices:\n if (not c in choice_cluster):\n activates,assig = self._activates(c,cluster_support)\n if activates:\n if c in choice_hierarchy:\n choice_hierarchy[c][(assig.value,assig.negated)]=choice_cluster\n else:\n choice_hierarchy[c]={(assig.value,assig.negated):choice_cluster}\n\n return choice_hierarchy,initially_active_choices", "def get_predefined_labels(self):\n raise NotImplementedError", "def test_init_sets_widget_verbose_name(self):\n for model in (Band, Genre):\n with self.subTest(model=model):\n form = self.form_class(choices={'replacements': model.objects.all()})\n self.assertEqual(\n form.fields['replacements'].widget.verbose_name,\n model._meta.verbose_name_plural\n )", "def test_labels(self):\n self.compliance_tester.test_labels(self.oi)", "def _print_enum_opt(self, option, choices):\n for key in choices:\n if key == self.conf[option]:\n print(\"* %s\" % key)\n else:\n print(\" %s\" % key)", "def subplotLabel(axs):\n for ii, ax in enumerate(axs):\n ax.text(-0.2, 1.2, ascii_uppercase[ii], transform=ax.transAxes, fontsize=16, fontweight=\"bold\", va=\"top\")", "def _save_with_labels(self):\n self.selected_values['with_labels'] = []\n for ww in self.labels_box.children:\n if ww.value:\n self.selected_values['with_labels'].append(\n str(ww.description))", "def select_model_by_facet_value(facet_value : str,root : PosixPath=Path(\"/kbdata/Processed/Models/\")) -> dict:\n models = root.glob(f\"*-{facet_value}.w2v.model\")\n \n out = {}\n for m in models:\n \n start = m.stem.lstrip(\"FT-\").split('-')[0]\n out[int(start)] = m\n \n return out", "def graph_multiple_choice(question_type, question, data_frame, path):\n if (question_type in [\"radio\", \"dropdown\"]):\n # Get this question's data\n column_data = data_frame[question[\"column-name\"]].astype('str')\n\n new_labels = []\n new_counts = []\n for option in question[\"choices\"]:\n new_labels.append(option)\n new_counts.append(column_data.str.count(\"^\" + re.escape(option) + \"$\").sum())\n\n num_items = np.arange(0, len(new_counts))\n\n plt.bar(num_items, new_counts)\n plt.xticks(num_items, new_labels, rotation=\"vertical\")\n plt.tight_layout()\n\n plt.savefig(path + question[\"column-name\"] + '.svg')\n plt.clf()\n plt.close()\n\n if question_type == \"boolean\":\n column_data = data_frame[question[\"column-name\"]].astype('str')\n new_counts = []\n choices = [\"Yes\", \"No\"]\n for option in choices:\n new_counts.append(column_data.str.count(option).sum())\n\n num_items = np.arange(0, len(new_counts))\n\n plt.bar(num_items, new_counts)\n plt.xticks(num_items, choices)\n plt.tight_layout()\n\n plt.savefig(path + question[\"column-name\"] + '.svg')\n plt.clf()\n plt.close()", "def plot_variables(labels, plot, data):\n # Create individual figures\n fig = subplots.make_subplots(rows=1, cols=1)\n for var in labels:\n if plot == 0:\n counts = data[var].value_counts()\n fig.append_trace(go.Bar(x=counts, y=counts.index, orientation='h'), 1, 1)\n elif plot == 1:\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][0], 1, 1)\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][1], 1, 1)\n elif plot == 2:\n fig.add_trace(go.Box(x=list(data[data[\"Score\"] == \"good\"][var]), name=\"Good\", hoverinfo=\"x\", marker_color='mediumturquoise'))\n fig.add_trace(go.Box(x=list(data[data[\"Score\"] == \"bad\"][var]), name=\"Bad\", hoverinfo=\"x\", marker_color='darkorange'))\n else:\n raise ValueError(\"plot number must be 0, 1, or 2\")\n # Create buttons for drop down menu\n buttons = []\n for i, label in enumerate(labels):\n if plot == 0:\n visibility = [i == j for j in range(len(labels))]\n else:\n visibility = [j//2 == i for j in range(2*len(labels))]\n button = dict(\n label=label,\n method='update',\n args=[{'visible': visibility},\n {'title': label}])\n buttons.append(button)\n updatemenus = list([\n dict(active=-1,\n x=1.06, y=1.27,\n buttons=buttons\n )\n ])\n # Setup layout\n if plot == 0:\n fig['layout']['title'] = \"Distribution of categorical and discrete variables:\"\n fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',\n marker_line_width=1.5, opacity=0.7)\n elif plot == 1:\n fig['layout']['title'] = \"Distribution of continuous variables:\"\n fig.update_traces(marker_color='rgb(112, 125, 188)', opacity=0.8)\n elif plot == 2:\n fig['layout']['title'] = \"Boxplot of continuous variables by score:\"\n fig['layout']['showlegend'] = False\n fig['layout']['updatemenus'] = updatemenus\n iplot(fig, config={\"displayModeBar\": False})", "def build_choices(header, dictionary, after):\n out = f\"{header}\\n\"\n for i, (key, item) in enumerate(dictionary.items(), start=1):\n out += f\"{INDENT_STRING}{i}. {item}\\n\"\n out += after\n return out", "def enable_selected(self, window, values, branch_log_dict, key_event):\n utils.convert_to_numeric(values)\n if(values[key_event] in branch_log_dict[key_event]):#if there is branching for the chosen option\n for element_key in branch_log_dict[key_event][values[key_event]]:\n #values the element can take\n if not isinstance(window[element_key], sg.Text):\n window[element_key].update(disabled = False)\n window[element_key].metadata = True\n window[element_key+\"_label\"].update(text_color = \"#FFFFFF\")#every non-text field has a label\n window[element_key].update(visible = True)", "def visualize(self, lim=False):\n d = {}\n for c_id in list(self.condition_dict.keys()):\n iden = self.condition_dict[c_id][0]['identifier']\n d[iden] = {}\n for k in self.acceptible_keys:\n d[iden][k] = []\n\n dict_keys = {}\n maxIdent = ''\n maxL = 0\n for c_id in list(self.condition_dict.keys()):\n iden = self.condition_dict[c_id][0]['identifier']\n keys = []\n for dct in self.condition_dict[c_id]:\n for k in self.acceptible_keys:\n keys.append(k)\n d[iden][k].append(dct[k])\n dict_keys[iden] = keys\n if len(keys) > maxL:\n maxIdent = iden\n maxL = len(keys)\n\n # Saved for testing purposes\n self.maxL = maxL\n self.maxIdent = maxIdent\n\n # Get variables which were actually changed\n dynamic_vars = []\n for idx, k in enumerate(self.acceptible_keys):\n for c_id in list(self.condition_dict.keys()):\n iden = self.condition_dict[c_id][0]['identifier']\n data = d[iden][k]\n if np.array(data).std() != 0:\n dynamic_vars.append(self.acceptible_keys[idx])\n dynamic_vars = list(set(dynamic_vars))\n for idx, k in enumerate(dynamic_vars):\n fig, axs = plt.subplots()\n for c_id in list(self.condition_dict.keys()):\n iden = self.condition_dict[c_id][0]['identifier']\n data = d[iden][k]\n\n if np.array(data).std() == 0:\n # If no variance in this cell sample (likely 'pristine' case)\n axs.axvline(x=data[0], label=iden, lw=5)\n\n else:\n # Circumventing error where only one subplot needs to be defined.\n if idx == 0:\n axs = sns.distplot(\n data, hist=False, rug=True, label=iden)\n else:\n axs = sns.distplot(\n data, hist=False, rug=True, ax=axs, label=iden)\n\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n axs.set_xlabel(k)\n plt.show()\n\n for idx, ident in enumerate(list(self.modcells.keys())):\n if idx == 0:\n ax = self.visualize_specific_iv(\n string_identifier=None, module_identifier=ident, substring_identifier=None)\n else:\n ax = self.visualize_specific_iv(\n ax=ax, string_identifier=None, module_identifier=ident, substring_identifier=None)\n plt.title('Module IV curves')\n\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n if lim:\n plt.xlim(xmin=0)\n plt.ylim(ymin=0)\n plt.show()\n\n if len(self.string_cond.keys()) > 0:\n for idx, str_key in enumerate(self.string_cond):\n if idx == 0:\n ax = self.visualize_specific_iv(\n string_identifier=str_key, module_identifier=None, substring_identifier=None)\n else:\n ax = self.visualize_specific_iv(\n ax=ax, string_identifier=str_key, module_identifier=None, substring_identifier=None)\n plt.title('String IV curves')\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n if lim:\n plt.xlim(xmin=0)\n plt.ylim(ymin=0)\n plt.show()", "def update_choice(self, value):\n if self.p is not None:\n if value == \"none\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', \"\")\n if value == \"categories\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == \"pageid\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == \"sections\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == \"html\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())", "def test_favourite_labels(self):\n\n fav = Favourite.objects.get(id=1)\n # label former_barcode\n label_name = fav._meta.get_field('former_barcode').name\n self.assertEqual(label_name, 'former_barcode')\n # label favourite_barcode\n label_name = fav._meta.get_field('favourite_barcode').name\n self.assertEqual(label_name, 'favourite_barcode')\n # label email_user\n label_name = fav._meta.get_field('email_user').name\n self.assertEqual(label_name, 'email_user')", "def _selected_labels_changed(self, name, old, new):\n if self.value_lock.acquire(False):\n try:\n self.value = [self._options_dict[name] for name in new]\n finally:\n self.value_lock.release()", "def vggface2_labels(self):\n id_meta = pd.read_csv(\"loki/static/models/vggface2/identity_meta.csv\",\n sep=\"\\n\")\n id_meta = id_meta[\n 'Class_ID, Name, Sample_Num, Flag, Gender'].str\\\n .split(',', expand=True)\n\n id_meta.columns = [\n 'Class_ID', 'Name', 'Sample_Num', 'Flag', 'Gender', 'None']\n id_meta.drop(columns=['None'], inplace=True)\n\n vgg_names = id_meta.drop(columns=[\n 'Sample_Num', 'Flag', 'Gender']).set_index('Class_ID')\n\n return vgg_names", "def study_factors(self):\n result = dict()\n for i in list(self.xml_tree.iter('Factor')):\n # print(i.attrib['Name'])\n sf_name = i.attrib['Name']\n if sf_name not in result:\n result[sf_name] = []\n for j in list(i.iter('FactorOption')):\n result[sf_name].append(j.attrib['Value'])\n\n return result", "def _get_labels(self, label_vector):\n return () if label_vector is None else \\\n list(OrderedDict.fromkeys([label for term in label_vector \\\n for label, power in term if power != 0]))" ]
[ "0.72828305", "0.6952813", "0.6830777", "0.6303037", "0.6230736", "0.60396737", "0.58811235", "0.56370056", "0.56334686", "0.5489322", "0.54117686", "0.53088254", "0.5206354", "0.5197836", "0.5150402", "0.5128234", "0.51101923", "0.50881493", "0.5084556", "0.50771785", "0.50643694", "0.5034793", "0.49991843", "0.49789056", "0.49783692", "0.49746746", "0.49715713", "0.49363285", "0.49324012", "0.49252543", "0.48968822", "0.4892259", "0.48719642", "0.4859144", "0.48444337", "0.4815168", "0.48107427", "0.4799982", "0.4794327", "0.47911736", "0.47898206", "0.4772612", "0.47676525", "0.47583675", "0.4758102", "0.475525", "0.47439882", "0.4735896", "0.47339922", "0.47262132", "0.47222498", "0.47190747", "0.47045082", "0.4701368", "0.4695264", "0.469303", "0.46893433", "0.46843675", "0.46806464", "0.46803674", "0.4669035", "0.46677482", "0.46672535", "0.46665475", "0.4665734", "0.46556875", "0.46526927", "0.46460825", "0.4644654", "0.46401814", "0.46382558", "0.46313873", "0.46299228", "0.46202275", "0.461975", "0.46144772", "0.4613953", "0.46130875", "0.4600849", "0.4597814", "0.4581767", "0.45773005", "0.45769247", "0.4569199", "0.4567383", "0.45652038", "0.4562312", "0.4561176", "0.45464054", "0.45446214", "0.4543757", "0.454012", "0.45353326", "0.45338655", "0.45331615", "0.4530664", "0.45078668", "0.45078424", "0.45058268", "0.45053288" ]
0.69385916
2
Should add an error if query is empty and sort is relevance
def test_clean(self): form = DocumentSearchForm() form.cleaned_data = {"q": "", "sort": "relevance"} form.clean() assert len(form.errors) == 1 # Otherwise should not raise an error form = DocumentSearchForm() form.cleaned_data = {"q": "test", "sort": "relevance"} form.clean() assert len(form.errors) == 0 form = DocumentSearchForm() form.cleaned_data = {"q": "", "sort": "scholarship_desc"} form.clean() assert len(form.errors) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sortby_invalid(self):\n qs = {'a': 1, 'w': 4, 'format': 'json', 'sortby': ''}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(200, response.status_code)", "def test_scroll_query_sort_safe(self):\n self._validate_scroll_search_params({\"sort\": \"_id\"}, {\"sort\": \"_id\"})", "def test_invalid_sort_by_similarity(self):\n\n # no filter by similarity but order by similarity\n query = [{\n \"object_name\": \"Assessment\",\n \"order_by\": [{\"name\": \"__similarity__\"}],\n \"filters\": {\"expression\": {}},\n }]\n\n self.assert400(self.client.post(\n \"/query\",\n data=json.dumps(query),\n headers={\"Content-Type\": \"application/json\"},\n ))\n\n # filter by similarity in one query and order by similarity in another\n query = [\n {\n \"object_name\": \"Assessment\",\n \"filters\": {\n \"expression\": {\n \"op\": {\"name\": \"similar\"},\n \"object_name\": \"Assessment\",\n \"ids\": [1],\n },\n },\n },\n {\n \"object_name\": \"Assessment\",\n \"order_by\": [{\"name\": \"__similarity__\"}],\n \"filters\": {\"expression\": {}},\n },\n ]\n\n self.assert400(self.client.post(\n \"/query\",\n data=json.dumps(query),\n headers={\"Content-Type\": \"application/json\"},\n ))", "def test_invalid_search_query(aquarius_instance):\n search_query = dict()\n search_query[\"sort\"] = \"foo_sort\"\n with pytest.raises(ValueError):\n aquarius_instance.query_search(search_query=search_query, sort=\"foo_sort\")", "def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".format(value)\n )", "def test_exception_invalid_sort_order(self):\n self.assertRaises(ValueError, self.conn.query, \"id:\" + \"abc\",\n **{\"sort\":\"id\", \"sort_order\":\"invalid_sort_order\"})", "def get_sort_query(self, kind, order, is_number):\n pass", "def test_wrong_search_criteria(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_decrease\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n self.assertEqual(json.loads(resp.content),\"You give your input in wrong format. Please check the API documentation for the appropriate input format!!\",\"Sorting Critera Input Control Doesn't Work\")", "def test_query_sort_default_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(sorted(data)):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def test_query_sort_nondefault_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\",\n sort_order=\"desc\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(reversed(sorted(data))):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))", "def on_sort_complete(self):\n if self.get_search_active_thread_count() == 0:\n self.clear_widgets()", "def sort_results(self):\n pass", "def test_invalid_search_order(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?dir=bar\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"Query parameter 'dir' must be one of ['b', 'f']\",\n channel.json_body[\"error\"],\n )", "def __init__(self, data=None, *args, **kwargs):\n super().__init__(data=data, *args, **kwargs)\n\n # if a keyword search term is not present, relevance sort is disabled\n if not data or not data.get(\"q\", None):\n self.fields[\"sort\"].widget.choices[0] = (\n self.SORT_CHOICES[0][0],\n {\"label\": self.SORT_CHOICES[0][1], \"disabled\": True},\n )", "def test_vlv_without_sort_order(self):\n search_dn = \"ou=nerdherd,%s\" % self.basedn\n self.assertRaises(bonsai.UnwillingToPerform,\n lambda: self.conn.search(search_dn, 1,\n attrlist=['uidNumber'],\n offset=1, before_count=1,\n after_count=2,\n est_list_count=6))", "def test_search_table_fail_if_no_query(self) -> None:\n with local_app.test_client() as test:\n response = test.get('/api/search/v0/table', query_string=dict(page_index='0'))\n self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)", "def test_invalid_text_search(aquarius_instance):\n text = \"foo_text\"\n with pytest.raises(ValueError):\n aquarius_instance.text_search(text=text, sort=\"foo_sort\")", "def test_scroll_no_searchtype_scan(self):\n self._validate_scroll_search_params({}, {\"sort\": \"_doc\"})", "def search(self, query, maxhits=100):", "def test_search_user_fail_if_no_query(self) -> None:\n with local_app.test_client() as test:\n response = test.get('/api/search/v0/user', query_string=dict(page_index='0'))\n self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)", "def search(self, query):", "def _validate_field(param, fields):\n\n if param.field not in fields:\n raise InvalidQueryParams(**{\n 'detail': 'The sort query param value of \"%s\" is '\n 'invalid. That field does not exist on the '\n 'resource being requested.' % param.raw_field,\n 'links': LINK,\n 'parameter': PARAM,\n })", "def validate_sort_order(filter, main_field):\n\n # The tiebreaker fields are always in the same order, but\n # if the main sort field is one of the tiebreaker fields,\n # it's removed from the list -- there's no need to sort on\n # that field a second time.\n default_sort_fields = [\n {x: \"asc\"} for x in ['sort_author', 'sort_title', 'work_id']\n if x != main_field\n ]\n assert default_sort_fields == filter.sort_order[1:]\n return filter.sort_order[0]", "def test_hits_should_be_correctly_sorted_on_title(\n self, client: Flask, docker_service, api, wait_for_information_models\n ):\n search_str = \"di\"\n body = {\"q\": search_str, \"size\": 300}\n result = client.post(informationmodel_url, json=body)\n assert result.status_code == 200\n result_json = result.json\n last_was_exact_match = True\n last_was_partial_match_in_title = False\n for hit in result_json[\"hits\"]:\n if has_exact_match_in_title(hit, search_str):\n assert last_was_exact_match\n last_was_exact_match = True\n elif has_partial_match_in_title(hit, search_str):\n assert last_was_exact_match or last_was_partial_match_in_title\n last_was_exact_match = False\n last_was_partial_match_in_title = True\n else:\n last_was_exact_match = False\n last_was_partial_match_in_title = False", "async def test_batch_list_with_bad_sort(self):\n self.stream.preset_response(self.status.INVALID_SORT)\n response = await self.get_assert_status('/transactions?sort=bad', 400)\n\n self.assert_has_valid_error(response, 57)", "def test_get_search_query_ordering(self):\n user_config = {\"weighted_display_name_like\": \"[Modernisation]\"}\n\n module, _, _ = create_user_directory_search_module_with_config(user_config)\n\n # Check postgres\n\n # Check the generated SQL and arguments of the above config when using postgres\n sql, args = module.get_search_query_ordering(PostgresEngine)\n\n # We don't care too much about the specifics of the SQL, just that our injected\n # CASE is present\n self.assertIn(\"display_name like ?\", sql.lower())\n\n # Check that the returned arguments match our config\n expected_args = (\"%\" + user_config[\"weighted_display_name_like\"] + \"%\",)\n self.assertEqual(args, expected_args)\n\n # Check sqlite\n\n # Check the generated SQL and arguments of the above config when using postgres\n sql, args = module.get_search_query_ordering(Sqlite3Engine)\n\n # We don't do anything different from Synapse's default SQL\n self.assertGreater(len(sql), 0)\n\n # Nor do we return any extra arguments\n expected_args = ()\n self.assertEqual(args, expected_args)", "def post_sort(self, qs):\n return qs", "def process_query(query):\n tmp= auto_fill_id_map[query]\n index = character_to_index_map[tmp]\n row = scores[index]\n\n t = []\n for i, v in enumerate(row):\n if v < 0:\n # do not add it this score (either from same movie or same character)\n continue\n t.append((v, i))\n\n t = sorted(t)\n indexes = [x[1] for x in t]\n results = []\n for i in reversed(indexes):\n results.append(data[i])\n\n # return results in sorted order\n return results[:20]", "def _validate_no_rels(param, rels):\n\n if param.field in rels:\n raise InvalidQueryParams(**{\n 'detail': 'The sort query param value of \"%s\" is not '\n 'supported. Sorting on relationships is not '\n 'currently supported' % param.raw_field,\n 'links': LINK,\n 'parameter': PARAM,\n })", "def test_search_nolocation_noquery(objectsearch):\n with pytest.raises(InvalidSearchParameterError):\n objectsearch.search(location=None, query=None)", "def validate_admin_sort_by(sort_on):\n try:\n sort_attributes = ['title', 'md_pub_date', 'summary']\n if sort_on in sort_attributes:\n return sort_on\n else:\n return 'title'\n except Exception as e:\n print \"Exception: \" + str(e)", "def validate_query_search(self, **kwargs):\n if \"query\" not in kwargs:\n raise KeyError(\"Must pass: query = 'search terms'\")\n if \"language\" not in kwargs:\n kwargs[\"language\"] = [\"en\"]\n\n return self.request(verb=requests.get, address=\"query-validation\", params=kwargs)", "def test_scroll_query_extended(self):\n self._validate_scroll_search_params({\"_id\": \"abc\"},\n {\"_id\": \"abc\", \"sort\": \"_doc\"})", "def pre_sort(self, qs):\n return qs", "def __generate_search_query(self) -> None:\n if self.query_accuracy < 100:\n if self.title is not None and self.title != '' and self.artist is not None and self.artist != '':\n # Use the title and the artist name to find more information about the song.\n query: str = self.title + ' ' + self.artist\n query = re.sub(self.__get_filter_regex(), '', query)\n self.query = query\n # Remove unnecessary information in order to get a simpler query version.\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 100\n return\n if self.query_accuracy < 50:\n # No title nor artist name available, use the filename as search query.\n filename: str = os.path.basename(self.original_path)\n filename = os.path.splitext(filename)[0]\n query: str = filename.lower()\n query = re.sub(self.__get_filter_regex(), '', query)\n query = query.replace('_', ' ')\n query = query.strip()\n self.query = query\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 50", "def test_non_existent_term_search(self):\n\n expected_results = []\n results = self.searcher.search(\"asdasdasdas\")\n\n self.assertListEqual(results, expected_results)", "def test_sorting_name2(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"name_increasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertLessEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def sort_results(self, sort_option):\r\n self.model.sort_data(sort_option)", "def __prepare_query(self, query, stopwords=[], stemming_func=None):\n pass", "def _validate_query(query):\n\n if len(query.kind) != 1:\n raise ValueError('Query must have exactly one kind.')\n\n if query.order:\n raise ValueError('Query cannot have any sort orders.')\n\n if query.HasField('limit'):\n raise ValueError('Query cannot have a limit set.')\n\n if query.offset > 0:\n raise ValueError('Query cannot have an offset set.')\n\n _validate_filter(query.filter)", "def test_sorting_name(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"name_decreasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertGreaterEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def test_missing_sort_value():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n BAR bigfoot X classification SORT Y classification COUNT\n \"\"\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def test_validate_search_no_query():\n result = validate_search_payload(None)\n assert result[\"errors\"] == \"No query parameter received.\"", "def test_query_q(client, example_records, h, prefix):\n # Test query (q=)\n res = client.get(f'{prefix}?q=title.en:text', headers=h)\n assert res.status_code == 200\n assert res.json[\"hits\"][\"total\"] == 1\n assert res.json['sortBy'] == 'bestmatch'\n\n # Test sort\n res = client.get(f'{prefix}?q=*&sort=bestmatch', headers=h)\n assert res.status_code == 200\n assert res.json['sortBy'] == 'bestmatch'\n\n # Test size\n res = client.get(f'{prefix}?size=1&page=1', headers=h)\n assert res.status_code == 200\n assert res.json['hits']['total'] == 2\n assert len(res.json['hits']['hits']) == 1\n assert 'next' in res.json['links']", "def test_search_no_params_error(self):\n self.assertRaises(\n TypeError,\n lambda: self.t['Scrubs'].search()\n )", "def sort_query(query, *args):\n\n return QuerySorter()(query, *args)", "def test_030_query_nothing(self):\n\n testflow.step(\"Querying for nothing\")\n assert not self.query_cli.run()[0], \"Invalid arguments of query passed\"", "def sortby(self):\n ...", "def test_analyze_a_recipe_search_query(self):\n pass", "def test_valid_search_order(self) -> None:\n\n # fetch the most recent first, largest timestamp\n channel = self.make_request(\n \"GET\",\n self.url + \"?dir=b\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n report = 1\n while report < len(channel.json_body[\"event_reports\"]):\n self.assertGreaterEqual(\n channel.json_body[\"event_reports\"][report - 1][\"received_ts\"],\n channel.json_body[\"event_reports\"][report][\"received_ts\"],\n )\n report += 1\n\n # fetch the oldest first, smallest timestamp\n channel = self.make_request(\n \"GET\",\n self.url + \"?dir=f\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n report = 1\n while report < len(channel.json_body[\"event_reports\"]):\n self.assertLessEqual(\n channel.json_body[\"event_reports\"][report - 1][\"received_ts\"],\n channel.json_body[\"event_reports\"][report][\"received_ts\"],\n )\n report += 1", "def test_sort_distributions_invalid_input(self):\r\n # Unfortunately, this code doesn't support the brosort algorithm... :(\r\n with self.assertRaises(ValueError):\r\n _ = _sort_distributions([[1, 2, 3], [3, 2, 1]], ['foo', 'bar'],\r\n ['r', 'b'], 'brosort')", "def operator(self, sort):\r\n return None", "def _sort_dataframe(self, dataframe):\r\n print('Not allowed')", "def test_empty_query_search(self):\n assert UserProfile.search('').count()", "def test_sorting_surname2(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_decreasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertGreaterEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def test_get_entities_note_no_filter_default_sort(app):\n with app.app_context():\n notes = get_entities(Note, 1, 5)\n assert notes.page == 1\n assert notes.per_page == 5\n assert 'ORDER BY notes.id ASC' in str(notes.query.statement)\n assert notes.query.whereclause is None", "def test_empty_search_query_returns_empty_queryset(self):\n article = ArticleFactory()\n article.publish()\n request = RequestFactory().get(\"\", {\"q\": \"\"})\n response = Search.as_view()(request)\n self.assertIn(\"results\", response.context_data)\n results = response.context_data[\"results\"]\n self.assertEqual(len(results), 0)", "def term_sort(want_query,not_query,inv_ind):\n \n# want_toks = tokenize_func(want_query)\n# not_toks = tokenize_func(not_query)\n \n wants = []\n for tok in want_query:\n if tok in inv_ind:\n l = len(inv_ind[tok])\n wants.append((tok,l))\n wants.sort(key = lambda x: x[1]) \n \n nots = []\n for tok in not_query:\n if tok in inv_ind:\n l = len(inv_ind[tok])\n nots.append((tok,l))\n nots.sort(key = lambda x: x[1])\n \n return wants,nots", "def test_paginate_no_inputs():\n result = search_paginate()\n assert result == (0, 50)", "def test_display_search_with_field_with_empty_tuple():\n result_tuple = ()\n result = display.display_search(result_tuple)\n\n assert result == \"No responses to list\"", "def test_get_entities_user_no_filter_default_sort(app):\n with app.app_context():\n users = get_entities(User, 1, 5)\n assert users.page == 1\n assert users.per_page == 5\n assert 'ORDER BY users.id ASC' in str(users.query.statement)\n assert users.query.whereclause is None", "def sort():\n return -1", "def get_query(self,q,request):\n kwargs = { \"%s__icontains\" % search_field : q }\n return model.objects.filter(**kwargs).order_by(search_field)", "def validate_lookup_search_term_format(search_query):\n if len(search_query) != 0:\n clear()\n return True\n\n else:\n clear()\n return False", "def get(self, request, *args, **kwargs):\n search_query = request.GET.get('q', None)\n if search_query is not None:\n if self.request.user.is_authenticated:\n self.queryset = Question.objects.filter(\n title__icontains=search_query).order_by('-rating')\n else:\n self.queryset = Question.objects.filter(title__icontains=search_query).order_by('-rating')\n if not self.queryset:\n messages.error(request, f'No results found for {search_query}')\n\n return super().get(request, *args, **kwargs)", "def validate_rule_search(self, **kwargs):\n if \"query\" not in kwargs:\n raise KeyError(\"Must pass: query = 'search terms'\")\n if \"language\" not in kwargs:\n kwargs[\"language\"] = [\"en\"]\n\n return self.request(verb=requests.get, address=\"query-validation/searchwithin\", params=kwargs)", "def search_bad_query(error):\n current_app.logger.debug(str(error))\n return render_template(\"search.html\", query=error.query, error=error)", "def __score(self, name, summary):\n score = 0\n for queryTerm in self.__query:\n if queryTerm.lower() in name.lower():\n score += 4\n if queryTerm.lower() == name.lower():\n score += 4\n \n if queryTerm.lower() in summary.lower():\n if QRegExp(r'\\b{0}\\b'.format(QRegExp.escape(queryTerm)),\n Qt.CaseInsensitive).indexIn(summary) != -1:\n # word match gets even higher score\n score += 2\n else:\n score += 1\n \n return score", "def order_log_results(self, log_search_order):\n raise errors.Unimplemented()", "def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)", "def test_order_by(self, http_query, cc_query):\n threads = [make_minimal_cs_thread()]\n self.register_get_user_response(self.user)\n self.register_get_threads_response(threads, page=1, num_pages=1)\n self.client.get(\n self.url,\n {\n \"course_id\": str(self.course.id),\n \"order_by\": http_query,\n }\n )\n self.assert_last_query_params({\n \"user_id\": [str(self.user.id)],\n \"course_id\": [str(self.course.id)],\n \"page\": [\"1\"],\n \"per_page\": [\"10\"],\n \"sort_key\": [cc_query],\n })", "def ask_user_for_relevance(query_results):\n for i, result in enumerate(query_results):\n hdr = 'Result #%d ' % (i+1)\n prompt_text = 'Is result #%d relevant? [y/n] ' % (i+1)\n print '\\n' + hdr + '-'*(70 - len(hdr))\n print result.to_formatted_string()\n print '-'*70\n while True:\n user_in = raw_input(prompt_text).strip().lower()\n if user_in == 'y' or user_in == 'n':\n break\n if user_in == 'y':\n result.is_relevant = True", "def test_agencies_search_none(self):\n query = \"kjlasdhfjhsdfljsdhflkasdjh\"\n response = self.client.get(reverse('agencies') + \"?query=\" + query)\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Department of Homeland Security' not in content)\n self.assertTrue('Department of Commerce' not in content)\n self.assertTrue('Patent and Trademark Office' not in content)\n self.assertTrue('no agencies matching your search' in content)", "def is_valid_query(query):\n\n if query is None:\n return False\n\n if len(query) == 0:\n return False\n\n return True", "def test_search_page_no_DB_results(self, mock_api, mock_search):\n mock_search.side_effect = [LookupError, LookupError]\n response = self.client.post(\"/search/\", {\"query\": \"test\"})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/results_page.html\")\n self.assertTemplateNotUsed(response, \"search/result.html\")\n self.assertEqual(response.context[\"error\"],\n \"Votre recherche n'a donné aucun résultats\")", "def query(self, query):", "def test_query_no_score(self):\n id = get_rand_string()\n\n # Same data and user_id\n user_id = data = get_rand_string()\n\n self.conn.add(id=id, user_id=user_id, data=data)\n self.conn.commit()\n\n results = self.conn.query(\"id:\" + id, score=False).results\n\n self.assertEquals(len(results), 1,\n \"No documents fetched, expected id:%s\" % (id))\n\n doc = results[0]\n\n self.assertTrue(\"score\" not in doc,\n \"No score should be returned, doc:%s\" % repr(doc))", "def order_log_entry_results(self, log_entry_search_order):\n raise errors.Unimplemented()", "def is_sorted_queryname(header):\n\tif(\"HD\" in header):\n\t\tif(\"SO\" in header[\"HD\"]):\n\t\t\tif(header[\"HD\"][\"SO\"] == \"queryname\"):\n\t\t\t\treturn True\n\treturn False", "def admin_search_student_query(request):\n\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n if request.session['type'] == 'S' or request.session['type'] == 'R': return redirect(reverse(URL_FORBIDDEN))\n\n if request.method == \"POST\":\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n email = request.POST['email']\n type = request.POST['type']\n dict = {}\n\n for user in User.objects.all():\n user_type = _get_user_type(user)\n\n if user_type is None or user_type == 'A':\n continue # for user who are not S, G, F, D, R, A\n\n user_first_name = None\n user_last_name = None\n user_email = None\n \n votes = 0\n\n if user_type == type:\n votes += 2\n\n if user_type == 'S':\n user_first_name = user.student.first_name\n user_last_name = user.student.last_name\n user_email = user.student.email\n elif user_type == 'G' or user_type == 'D':\n user_first_name = user.faculty.first_name\n user_last_name = user.faculty.last_name\n user_email = user.faculty.email\n elif user_type == 'R':\n user_first_name = user.first_name\n user_last_name = user.last_name\n user_email = user.email\n\n if first_name.upper() in user_first_name.upper():\n votes += 1\n\n if last_name.upper() in user_last_name.upper():\n votes += 1\n\n if email.upper() in user_email.upper():\n votes += 1\n\n dict[user] = votes\n \n sorted_results = sorted(dict.items(), key = operator.itemgetter(1))\n sorted_results.reverse()\n result = _clean_user_info_results(sorted_results)\n\n return HttpResponse(json.dumps(result), content_type = 'application/json')\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def test_queryUnkeywordFlag(self):\n self._keywordFilteringTest(\"unkeyword\")", "def abort_search(self):\n self._raise_not_supported()", "def test_get_total_no_inputs():\n result = search_paginate()\n assert result == (0, 50)", "def test_search_table_fail_if_no_page_index(self) -> None:\n with local_app.test_client() as test:\n response = test.get('/api/search/v0/table', query_string=dict(query='test'))\n self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)", "def test_invalid_values(self):\n self.assertEqual(dictsort([1, 2, 3], \"age\"), \"\")\n self.assertEqual(dictsort(\"Hello!\", \"age\"), \"\")\n self.assertEqual(dictsort({\"a\": 1}, \"age\"), \"\")\n self.assertEqual(dictsort(1, \"age\"), \"\")", "def test_sorting_surname(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_increasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertLessEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def orderby():\n pass", "def search(request):\n\n filterList = ['everyone', 'black', 'hispanic', 'female', 'lgbt', 'immigrants', 'disabled', 'poor']\n sorts = {\"Title(A-Z)\":\"title\", \"Title(Z-A)\":\"-title\", \"Value Increasing\":\"value\", \"Value Decreasing\":\"-value\", \"Due Date\":\"dueDate\"}\n\n #urls agruments\n query = request.GET.get('query', '')\n Filter = request.GET.get('filter', 'Default')\n sortby = request.GET.get('sortby', 'Default')\n pageNum = request.GET.get('page', '1')\n \n postResult = Post.objects.all().order_by(sorts.get(sortby,\"UUID\"))\n\n if(query):\n #search for words in the title, description or organizaton name, then filter out the tags\n postResult = Post.objects.filter((Q(title__icontains=query)|Q(description__icontains=query))|Q(organization__username__icontains=query))\n if(Filter !='Default' and Filter.lower() in filterList): \n #check if the filter the user entered is a tag\n postResult = postResult.filter(tags__name__in=[Filter.lower()]) \n\n #function to help divide our query into groups of 12 for pagination\n pageObj = Paginator(postResult, 12)\n\n #select the page that we want\n page = pageObj.page(pageNum)\n\n\n context = {\n 'result': postResult,\n 'page_obj': page,\n 'query': query,\n 'filter': Filter,\n 'sortby': sortby,\n }\n return render(request, 'search.html', context=context)", "def test_search_query_wrongtype(objectsearch):\n with pytest.raises(InvalidSearchParameterError):\n objectsearch.search(query='computer says no')", "def test_existent_term_search(self):\n results = self.searcher.search(\"coach\")\n expected_results = 3\n\n self.assertEqual(results[0].indexable.docid, expected_results)", "def validate_doi_view_sort_by(sort_on):\n try:\n sort_attributes = ['title', 'md_pub_date', 'summary', 'assigned_doi_ark']\n if sort_on in sort_attributes:\n return sort_on\n else:\n return 'title'\n except Exception as e:\n print \"Exception: \" + str(e)", "def test_sortby_documents_helpful(self):\n r1 = RevisionFactory(is_approved=True)\n r2 = RevisionFactory(is_approved=True)\n HelpfulVoteFactory(revision=r2, helpful=True)\n\n # Note: We have to wipe and rebuild the index because new\n # helpful_votes don't update the index data.\n self.setup_indexes()\n self.reindex_and_refresh()\n\n # r2.document should come first with 1 vote.\n response = self.client.get(reverse('search.advanced'), {\n 'w': '1', 'a': '1', 'sortby_documents': 'helpful',\n 'format': 'json'\n })\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(r2.document.title, content['results'][0]['title'])\n\n # Vote twice on r1, now it should come first.\n HelpfulVoteFactory(revision=r1, helpful=True)\n HelpfulVoteFactory(revision=r1, helpful=True)\n\n self.setup_indexes()\n self.reindex_and_refresh()\n\n response = self.client.get(reverse('search.advanced'), {\n 'w': '1', 'a': '1', 'sortby_documents': 'helpful',\n 'format': 'json'})\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(r1.document.title, content['results'][0]['title'])", "def test_invalid_args(self):\n self.assertEqual(dictsort([{}], \"._private\"), \"\")\n self.assertEqual(dictsort([{\"_private\": \"test\"}], \"_private\"), \"\")\n self.assertEqual(\n dictsort([{\"nested\": {\"_private\": \"test\"}}], \"nested._private\"), \"\"\n )", "def check_query(report):\n\n if string.find(report.db_query, \"count(\") != -1:\n\tif report.db_query[len(report.db_query) - 1] == \")\" or report.db_query[len(report.db_query) - 1] == \";\":\n\t db_query = report.db_query\n\telse:\n\t print \"error - unsupported query: report title: %s, id: %s\" % (report.title, report.id)\n\t return (False, '')\n elif string.find(report.db_query, \"group(\") != -1 or string.find(report.db_query, \"mapReduce(\") != -1:\n\tif report.db_query[len(report.db_query) - 1] == \")\":\n\t if string.find(report.db_query, \"forEach(printjson)\") == -1:\n\t\tdb_query = report.db_query+\".forEach(printjson)\"\n\t else:\n\t\tdb_query = report.db_query\n\telif report.db_query[len(report.db_query) - 1] == \";\":\n\t if string.find(report.db_query, \"forEach(printjson)\") == -1:\n\t\tdb_query = report.db_query[0:len(report.db_query) - 1]+\".forEach(printjson)\"\n\t else:\n\t\tdb_query = report.db_query\n\telse:\n\t print \"error - unsupported query: report title: %s, id: %s\" % (report.title, report.id)\n\t return (False, '')\n else:\n\tprint \"error - unsupported query: report title: %s, id: %s\" % (report.title, report.id)\n\treturn (False, '')\n\n return (True, db_query)", "def testSortNoDbAscending(self):\n self.request.GET['sort'] = \"custom\"\n self.datagrid.load_state()\n self.assertEqual(self.datagrid.sort_list, [\"custom\"])\n self.assertEqual(len(self.datagrid.rows), self.datagrid.paginate_by)\n self.assertEqual(self.datagrid.rows[0]['object'].name, \"Group 04\")\n self.assertEqual(self.datagrid.rows[1]['object'].name, \"Group 08\")\n self.assertEqual(self.datagrid.rows[2]['object'].name, \"Group 12\")\n\n # Exercise the code paths when rendering\n self.datagrid.render_listview()", "def search(self, query, k):\n docs={}\n for term in set(query.split(' ')):\n for article in self.tf_idf:\n if term in self.tf_idf[article]:\n if article in docs:\n docs[article]+=self.tf_idf[article][term]\n else:\n docs[article]=self.tf_idf[article][term]\n docs_sort=sorted(docs.items(), key=lambda p: (p[1],p[0]), reverse=True)\n docs_sort=[x for x in docs_sort if x[1] >= 0]\n if len(docs_sort)<k:\n print (docs)\n return docs\n else:\n print (docs_sort[:k])\n return docs_sort[:k]", "def test_no_value_for_search_query_returns_empty_queryset(self):\n article = ArticleFactory()\n article.publish()\n request = RequestFactory().get(\"\", {})\n response = Search.as_view()(request)\n self.assertIn(\"results\", response.context_data)\n results = response.context_data[\"results\"]\n self.assertEqual(len(results), 0)", "def test_search_user_fail_if_no_page_index(self) -> None:\n with local_app.test_client() as test:\n response = test.get('/api/search/v0/user', query_string=dict(query='test'))\n self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)", "def get_query(self, q, request):\r\n \r\n return self.model.objects.filter(nom__icontains=q).order_by('nom')[:50]" ]
[ "0.68004227", "0.668683", "0.649039", "0.6110041", "0.60085225", "0.5954969", "0.5795239", "0.5701212", "0.5672946", "0.5664839", "0.5664839", "0.56449044", "0.5619792", "0.5586709", "0.5570209", "0.55674064", "0.5558715", "0.55321485", "0.5520282", "0.55074567", "0.5495927", "0.53797334", "0.53739357", "0.5372519", "0.53645825", "0.53626543", "0.5360841", "0.53483963", "0.53442997", "0.5294291", "0.52657443", "0.5237829", "0.5236012", "0.5222183", "0.5219065", "0.52151716", "0.5212686", "0.52023005", "0.51822734", "0.51685226", "0.5137965", "0.5136125", "0.51292974", "0.5105255", "0.5104217", "0.51034164", "0.5096991", "0.50912094", "0.5088708", "0.5088134", "0.5082793", "0.50792754", "0.5077034", "0.5074088", "0.50740486", "0.506872", "0.50678396", "0.5063118", "0.5056802", "0.5055417", "0.50534403", "0.50509495", "0.50457335", "0.504437", "0.5030559", "0.5029873", "0.5026274", "0.5025875", "0.5018854", "0.50095284", "0.5002628", "0.49996257", "0.4995943", "0.49917918", "0.4986784", "0.49861306", "0.49836144", "0.4981952", "0.49793315", "0.49749497", "0.4964195", "0.49625954", "0.496104", "0.49603003", "0.49454698", "0.49451032", "0.4937333", "0.49253726", "0.49179244", "0.49173605", "0.49171263", "0.491664", "0.49104908", "0.49077395", "0.49033493", "0.49009076", "0.4894266", "0.48931387", "0.4881512", "0.48751462", "0.48742875" ]
0.0
-1
Should add an error if rationale is 'other' and rationale notes are empty
def test_clean(self): doc = Document.objects.create() form = DocumentMergeForm() form.cleaned_data = { "primary_document": doc.id, "rationale": "other", "rationale_notes": "", } form.clean() assert len(form.errors) == 1 # should not produce an error if rationale notes provided form = DocumentSearchForm() form.cleaned_data = { "primary_document": doc.id, "rationale": "other", "rationale_notes": "test", } form.clean() assert len(form.errors) == 0 # should not produce an error if rational is "duplicate" or "join" form = DocumentSearchForm() form.cleaned_data = { "primary_document": doc.id, "rationale": "duplicate", "rationale_notes": "", } form.clean() assert len(form.errors) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_contentious_prescription_no_rationale(self):\n url = reverse('admin:prescription_prescription_add')\n data = {\n 'name': 'Test',\n 'planned_season': 1,\n 'planned_year': 2013,\n 'region': 1,\n 'district': 1,\n 'location': 'Test location',\n 'perimeter': 20,\n 'area': 100,\n 'purposes': [1],\n 'remote_sensing_priority': 4,\n 'priority': 2,\n 'contentious': True,\n }\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(Prescription.objects.count(), 0)\n form = response.context['adminform'].form\n self.assertEqual(form.errors, {\n 'contentious_rationale': ['A contentious burn requires a '\n 'contentious rationale.']\n })", "def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")", "def rationale(\n self, data: Dict, params: Optional[Dict] = None, headers: Optional[Dict] = None\n ) -> Dict:\n method = self._get_method(\"rationale\")\n\n return self.call_api_post(\n method=method, data=data, params=params, headers=headers\n )", "def test_missing_description(self):\n self.check_validation_error(\"description\\n field required\", name=\"Name\")", "def question_verification(title, description):\n if len(title) < 1:\n return 'You cannot post an empty title, Please add a title'\n if len(description) < 1:\n return 'You cannot post an empty description, Please add a description'\n if title.isdigit():\n return 'You cannot have a title with digits only, Please describe with some words'\n if description.isdigit():\n return 'You cannot have a description with digits only, Please describe with some words'", "def test_invalid_general_collateral_missing_description():\n collateral = copy.deepcopy(GENERAL_COLLATERAL)\n del collateral['description']\n\n is_valid, errors = validate(collateral, 'generalCollateral', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def testInvalidDescriptions(self):\n self.assertFalse(self.app._ignore_jobs(\"telecommuting is not an option\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommuting\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommute\"))\n self.assertFalse(self.app._ignore_jobs(\"TELECOMMUTE IS NOT AN OPTION\"))", "def reason(self, example):\n raise NotImplementedError()", "def test_invalid_general_collateral_description():\n collateral = copy.deepcopy(GENERAL_COLLATERAL)\n collateral['description'] = 'XX'\n\n is_valid, errors = validate(collateral, 'generalCollateral', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_notes_invalid_student(self):\n student_id = '1234567890'\n career_id = 34\n perdiod_id = 115\n result = self.ucuenca.notes(student_id, career_id, perdiod_id)\n self.assertFalse(result)", "def getReason():", "def no_underline_and_no_description(): # noqa: D416", "def test_query_no_def_invalid(self):\n with self.assertRaises(ValueError) as context:\n query_yes_no(question=\"Is anyone wiser than Socrates?\", default=\"xxx\")", "def if_not_matched(disease):\n\t\tprint(\"\")\n\t\tid_disease = disease\n\t\tdisease_details = get_details(id_disease)\n\t\ttreatments = get_treatments(id_disease)\n\t\tprint(\"\")\n\t\tprint(\"The most probable disease that you have is %s\\n\" %(id_disease))\n\t\tprint(\"A short description of the disease is given below :\\n\")\n\t\tprint(disease_details+\"\\n\")\n\t\tprint(\"The common medications and procedures suggested by other real doctors are: \\n\")\n\t\tprint(treatments+\"\\n\")", "def test_no_errors(self):\n test_error = \"\\r\\n--------------------------------------------------------------------\\r\\n\"\\\n \"Your code has been rated at 10.00/10 (previous run: 9.33/10, +0.67)\"\n\n self.assertEqual(\n format_errors(test_error),\n None\n )", "def test_sanity(self) -> None:\n if self.report.headlines:\n return\n\n if self.report.document.paragraphs:\n self.add_error(\n \"Rubrikerna i dokumentet är felformaterade eller saknas. \"\n \"Rubrikerna ska vara skrivna i versaler och ha samma \"\n \"typsnitt, stil och storlek som brödtexten. \"\n \"Rubriker avslutas med radbrytning.\"\n )\n\n if not self.report.document.paragraphs:\n self.add_error(\"Ditt dokument är antigen tomt eller i fel format.\")", "def test_missing_one_correction(self):\n errors = self.response.data[\"errors\"]\n errors[0][\"correction\"] = []\n self.response2 = self.client.post(\n reverse(\"correct\"), {\"id\": 1, \"errors\": errors}, format=\"json\"\n )\n\n self.assertEqual(\n self.response2.data,\n f\"You must insert a correction for the word {errors[0]['word']} or delete this error entry if you want to ignore it\",\n )", "def test_appraisal_note(self):\n transfer = random.choice(\n Transfer.objects.filter(process_status=Transfer.VALIDATED))\n note_text = helpers.random_string(30)\n for req_type in [\"submit\", \"edit\", \"delete\"]:\n self.assert_status_code(\n \"get\", reverse(\"appraise:list\"), 200,\n data={\n \"req_form\": \"appraise\",\n \"req_type\": req_type,\n \"upload_id\": transfer.pk,\n \"appraisal_note\": note_text,\n }, ajax=True)\n updated = Transfer.objects.get(pk=transfer.pk)\n self.assertEqual(updated.appraisal_note, None if req_type == \"delete\" else note_text)", "def test_explained_text(self):\n result = self._do_output(o.ExplainedTextOutput(o.Color.Never), self._demo_msgs)\n self.assertEqual(result,\n \"mock: mock.cmake(1): error: short text\\n\"\n \" * long text\\n\"\n \" * You can ignore this problem with --ignore mock_msg\\n\"\n \"mock: mock.cmake(2): warning: short text\\n\"\n \"mock: mock.cmake(3): notice: short text\\n\"\n \"mock: error: short text\\n\"\n \"mock: mock.cmake: error: short text\\n\"\n )", "def test_check_explainer_1(self):\n explainer = None\n assert check_explainer(explainer) == None", "def testValidDescriptions(self):\n self.assertTrue(self.app._ignore_jobs(\"\"))\n self.assertTrue(self.app._ignore_jobs(\"This is valid\"))\n self.assertTrue(self.app._ignore_jobs(\"you can telecommute\"))", "def validate_notes_input(notes):\n if len(notes) == 0:\n notes = 'None'\n clear()\n return notes", "def test_do_not_need_alternate(self):\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-fail.xml'\n ))\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-success.xml'\n ))\n actual = self._analyze_make_output()\n self.assertEqual(1, actual)\n self.assertIn('E999 lint error from txt-file.', self.errors[0])", "def test_get_opposite_meaning_add(self):\n\t\tobj_ut = sentiment.get_opposite_meaning(\n\t\t\t\"good\")\n\t\tself.assertEqual(obj_ut, \n\t\t\t\"(not|dont|cant|wont|couldnt|shouldnt|never) (\\w+ ){0,2} ?good\")", "def get_note_message(note):\n assert note <= 10, \"Note is %.2f. Either you cheated, or pylint's \\\nbroken!\" % note\n if note < 0:\n msg = 'You have to do something quick !'\n elif note < 1:\n msg = 'Hey! This is really dreadful. Or maybe pylint is buggy?'\n elif note < 2:\n msg = \"Come on! You can't be proud of this code\"\n elif note < 3:\n msg = 'Hum... Needs work.'\n elif note < 4:\n msg = 'Wouldn\\'t you be a bit lazy?'\n elif note < 5:\n msg = 'A little more work would make it acceptable.'\n elif note < 6:\n msg = 'Just the bare minimum. Give it a bit more polish. '\n elif note < 7:\n msg = 'This is okay-ish, but I\\'m sure you can do better.'\n elif note < 8:\n msg = 'If you commit now, people should not be making nasty \\\ncomments about you on c.l.py'\n elif note < 9:\n msg = 'That\\'s pretty good. Good work mate.'\n elif note < 10:\n msg = 'So close to being perfect...'\n else:\n msg = 'Wow ! Now this deserves our uttermost respect.\\nPlease send \\\nyour code to python-projects@logilab.org'\n return msg", "def _on_op_remark(self, msg):\r\n\r\n if \"success\" in msg and not msg[\"success\"]:\r\n if msg[\"message\"] == \"Invalid call\":\r\n self._on_invalid_call(msg)\r\n elif msg[\"message\"] == \"Order not found\":\r\n self._on_order_not_found(msg)\r\n elif msg[\"message\"] == \"Order amount is too low\":\r\n self._on_order_amount_too_low(msg)\r\n elif \"Too many orders placed\" in msg[\"message\"]:\r\n self._on_too_many_orders(msg)\r\n else:\r\n # we should log this, helps with debugging\r\n self.debug(msg)", "def missed_needed_docstring(self):\n self.needed += 1\n self.missing += 1", "def lf_impression_section_negative(report):\n impression_words = ['impression','interpretation','comments']\n impression = get_section_with_name(impression_words, report)\n reg_normal = ['no epileptiform', 'absence of epileptiform', 'not epileptiform', \n 'normal EEG', 'normal aEEG','benign','non-specific','nonepileptic','idiopathic',\n 'no seizures','EEG is normal','normal study']\n if any([re.search(reg, impression, re.IGNORECASE) for reg in reg_normal] ):\n return OTHERS_VAL\n #return NA_VAL\n else:\n return ABSTAIN_VAL", "def should_add_pr_comment(self):\n pass", "async def note(self, ctx):\n note_embed = discord.Embed(color=discord.Color.blurple())\n note_embed.add_field(name=\"__**Please Note**__\", value=RULES_NOTE)\n await ctx.send(embed=note_embed)", "def allowedSpecific(self, differences, msg=None):\n return allowed_specific(differences, msg)", "def remediation_description(self) -> Optional[str]:\n return pulumi.get(self, \"remediation_description\")", "def test_create_negative_feedback_removal(self):\n pass", "def _get_title_and_explanation(self):\n title = \"\"\n more_lines = []\n if self.__doc__:\n # Find the first non-empty line in the docstring. If there is\n for line in self.__doc__.split(\"\\n\")[:-1]: # strip off last line, always blank\n line = line.strip()\n if line:\n if not title:\n # We don't have the title set, yet, so we know this is the first line.\n if line.endswith(\".\"):\n # Don't want a period at the end of a title to make it look\n # better.\n title = line[:-1]\n else:\n title = line\n continue\n if not line and not more_lines:\n # We don't need empty lines at the start of the explanation\n continue\n # Add up the lines of the explanation text\n if line.startswith(\"*\"):\n line = f\"&nbsp; &nbsp; {line}\"\n\n more_lines.append(line or \"<br>&nbsp;<br>\") # Empty lines become line break\n return ((title or \"A resource\"), \" \".join(more_lines))", "def test_unwanted_words(self) -> None:\n pad_open: bool = False\n for word in self.report.get_words():\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n for u_word in self.rules.unwanted_words:\n if word.text == u_word[\"word\"]:\n self.add_error(\n f\"Ordet {word.text} är inte tillåtet, \"\n f\"använd {u_word['alternative']} istället.\",\n word=word,\n )\n break", "def set_invalid_notes(self, error):\n self._invalid_notes = error", "def remediation_description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"remediation_description\")", "def remediation_description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"remediation_description\")", "def test_prefer_failed_alternate(self):\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-success.xml'\n ))\n actual = self._analyze_make_output()\n self.assertEqual(1, actual)\n self.assertIn('E302 expected 2 blank lines', self.errors[0])", "def test_manual_entry_error(self):\r\n self._login_admin()\r\n # no url entered\r\n res = self.app.post(\r\n '/admin/new_error',\r\n params={\r\n 'url': '',\r\n 'description': '',\r\n 'extended': '',\r\n 'tags': ''\r\n })\r\n self.assertIn('not valid', res.body)", "def negation_check(self,sentence):", "def negations(self) -> str:", "def invalid_notes(self):\n return self._invalid_notes", "def PleaseCorrect(self, mr, **echo_data):\n mr.PrepareForReentry(echo_data)\n self.get()", "def test_mult_specifiers_missing(self):\n template = '{0} too few {1}'\n value_count = 3\n msg = ('The formatter contains too few \"{}\" '\n 'specifiers for the number of source fields.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def whyNotLegal(self):\r\n return self._getLegalityStatus()[1]", "def reason_to_reject(self):\n # Has used search enough\n if self.num_searches < constants.MIN_NUM_SEARCH_ONBOARDING:\n return f'did not use search enough (number of use {self.num_searches}).'\n\n # Has selected enough sentenes\n num_selections = 0\n for msg in self.messages:\n task_data = msg.get('task_data')\n if not (task_data and isinstance(task_data, dict)):\n continue\n sel_options = task_data.get('selected_text_candidates')\n if not sel_options or len(sel_options) == 1: # No choices\n continue\n if not sel_options[0][0]:\n # sel_options[0][0] is \"Did no use ...\" option\n num_selections += 1\n\n if num_selections < constants.MIN_NUM_SELECTED_SENTENCES_ONBOARDING:\n return (\n 'did not use or select search results enough times '\n f'(number of times used: {num_selections})'\n )\n return super().reason_to_reject()", "def diagnostic_self_test_fail(reason='no errors found', additional_text='no errors found', severity_level='error'):\n\n SysTools.notify.error(RPD_EVENT_CONNECTIVITY_DIAGNOSTIC_SELF_TEST_FAIL[0],\n reason, additional_text, severity_level)", "def test_no_reason(self):\n form = self._get_form(data={'reason': ''})\n self.assertFalse(form.is_valid())\n self.assertTrue('reason' in form.errors)", "def valid_alternative_image_text(arch, **kwargs):\n if arch.xpath('//img[not(@alt or @t-att-alt or @t-attf-alt)]'):\n return \"Warning\"\n return True", "def test_other_no_lastres(bot, bot_arg, update):\n other(bot_arg, update)\n\n expect = \"You haven't searched for anything yet\"\n assert bot_arg.msg_log[0] == expect", "def _validate_analysis_description(\n analysis_description,feature_types,sample_column='sample_igf_id',\n feature_column='feature_type',reference_column='reference'):\n try:\n messages = list()\n analysis_list = list()\n sample_id_list = list()\n if not isinstance(analysis_description,list):\n raise ValueError(\n 'Expecting a list of analysis_description, got {0}'.\\\n format(type(analysis_description)))\n if not isinstance(feature_types,list):\n raise ValueError(\n 'Expecting a list for feature_types, got {0}'.\\\n format(type(feature_types)))\n df = pd.DataFrame(analysis_description)\n for c in (sample_column,feature_column):\n if c not in df.columns:\n messages.\\\n append('missing {0} in analysis_data'.format(c))\n if len(messages) > 0:\n raise KeyError('Missing key column: {0}'.format(messages))\n analysis_list = \\\n list(\n df[feature_column].\\\n dropna().\\\n drop_duplicates().\\\n values)\n analysis_list = \\\n set(\n [f.replace(' ','_').lower()\n for f in analysis_list])\n analysis_list = list(analysis_list)\n sample_id_list = \\\n list(\n df[sample_column].\\\n dropna().\\\n drop_duplicates().\\\n values)\n for f,f_data in df.groupby(feature_column):\n f = f.replace(' ','_').lower()\n f_samples = list(f_data[sample_column].values)\n if f not in feature_types:\n messages.\\\n append('feature_type {0} is not defined: {1}'.\\\n format(f,f_samples))\n if len(f_samples) > 1:\n messages.\\\n append('feature {0} has {1} samples: {2}'.\\\n format(f,len(f_samples),','.join(f_samples)))\n if reference_column in df.columns:\n ref_msg = \\\n ['reference {0} does not exists'.format(r)\n for r in list(df['reference'].dropna().values)\n if not os.path.exists(r)]\n if len(ref_msg) > 0:\n messages.\\\n extend(ref_msg)\n return sample_id_list, analysis_list, messages\n except Exception as e:\n raise ValueError(e)", "def test_descriptions_render_correctly(self):\n # help text in fields\n self.assertContains(\n self.response, \"<td>first name - The person's first name</td>\"\n )\n self.assertContains(\n self.response, \"<td>last name - The person's last name</td>\"\n )\n\n # method docstrings\n self.assertContains(self.response, \"<p>Get the full name of the person</p>\")\n\n link = '<a class=\"reference external\" href=\"/admindocs/models/%s/\">%s</a>'\n markup = \"<p>the related %s object</p>\"\n company_markup = markup % (link % (\"admin_docs.company\", \"admin_docs.Company\"))\n\n # foreign keys\n self.assertContains(self.response, company_markup)\n\n # foreign keys with help text\n self.assertContains(self.response, \"%s\\n - place of work\" % company_markup)\n\n # many to many fields\n self.assertContains(\n self.response,\n \"number of related %s objects\"\n % (link % (\"admin_docs.group\", \"admin_docs.Group\")),\n )\n self.assertContains(\n self.response,\n \"all related %s objects\"\n % (link % (\"admin_docs.group\", \"admin_docs.Group\")),\n )\n\n # \"raw\" and \"include\" directives are disabled\n self.assertContains(\n self.response,\n \"<p>&quot;raw&quot; directive disabled.</p>\",\n )\n self.assertContains(\n self.response, \".. raw:: html\\n :file: admin_docs/evilfile.txt\"\n )\n self.assertContains(\n self.response,\n \"<p>&quot;include&quot; directive disabled.</p>\",\n )\n self.assertContains(self.response, \".. include:: admin_docs/evilfile.txt\")\n out = self.docutils_stderr.getvalue()\n self.assertIn('\"raw\" directive disabled', out)\n self.assertIn('\"include\" directive disabled', out)", "def test_get_opposite_meaning_subtract(self):\n\t\tobj_ut = sentiment.get_opposite_meaning(\n\t\t\t\"(not|dont|cant|wont|couldnt|shouldnt|never) (\\w+ ){0,2} ?good\")\n\t\tself.assertEqual(obj_ut, \"good\")", "def get_intro_message() -> str:\n return \"\"\"You are about to begin a new record.\nType the text sample you want to record.\nThis first sample MUST be typed by the real user (no impostor data).\"\"\"", "def make_extra_questions_txt(self):\n raise NotImplementedError", "def validateDescription(description):\n \n if not(description) or len(description.split()) < 5:\n return \"You must supply a description of at least 5 words.\"", "def test_other_no_sources(monkeypatch, bot, bot_arg, update):\n monkeypatch.setattr(fake_log, 'source', lyricfetch.sources[-1])\n bot.log_result('chat_id', fake_log)\n\n other(bot_arg, update)\n assert 'No other sources' in bot_arg.msg_log[0]", "def test_unknown_text(self):\n metric1 = dict(\n type=\"metric_type\",\n name=\"Metric\",\n unit=\"units\",\n scale=\"count\",\n recent_measurements=[\n dict(count=dict(value=0, status=\"near_target_met\")),\n dict(count=dict(value=None, status=\"unknown\")),\n ],\n )\n metric_notification_data1 = MetricNotificationData(metric1, self.data_model, \"status_changed\")\n notification = Notification(self.report, [metric_notification_data1], \"destination_uuid\", {})\n text = build_notification_text(notification)\n self.assertEqual(\n \"[Report 1](https://report1) has 1 metric that is notable:\\n\\n\"\n \"* Metric status is white (unknown), was yellow (near target met). Value is ? units, was 0 units.\\n\",\n text,\n )", "async def _add_note(\n self,\n ctx: Context,\n note: Union[discord.Message, str],\n *,\n reason: str = None\n ):\n\n if isinstance(note, discord.Message):\n content = note.clean_content\n author = str(note.author)\n channel = note.channel.mention\n jump_url = note.jump_url\n else:\n content = note\n author = None\n channel = None\n jump_url = None\n\n async with self.config.member(ctx.author).notes() as notes:\n notes.append({\n \"note\": content,\n \"reason\": reason or \"No reason\",\n \"author\": author,\n \"channel\": channel,\n \"jump_url\": jump_url\n })\n\n await ctx.message.add_reaction(CHECK_MARK)", "def checkEntryPopulated(self, entry, errorTitle='Field Not Populated', errorMessage='Please populate field.', errorDescription=None):\n\n # if user does not provide an error description generate one automatically\n if not errorDescription:\n errorDescription = 'relevant entry name: ' + str(entry.objectName())\n\n text = str(entry.text())\n\n if text == '':\n raise ValueError(errorTitle, errorMessage, errorDescription)\n\n return text", "def test_calling_ruler_without_overwrite_will_keep_exisiting_ents(\n ruler: SpaczzRuler, doc: Doc\n) -> None:\n doc.ents += (\n Span(doc, 2, 4, label=\"WRONG\"),\n Span(doc, 15, 16, label=\"WRONG\"),\n )\n doc = ruler(doc)\n assert len([ent.label_ for ent in doc.ents if ent.label_ == \"WRONG\"]) == 2", "def test_explainer_class_errors(self):\n feature_importance = 'Feature importance not implemented.'\n model_explanation = 'Model explanation (global) not implemented.'\n instance_explanation = ('Data point explanation (local) not '\n 'implemented.')\n\n with pytest.raises(NotImplementedError) as exinf:\n self.explainer.feature_importance()\n assert str(exinf.value) == feature_importance\n with pytest.raises(NotImplementedError) as exinf:\n self.explainer.explain_model()\n assert str(exinf.value) == model_explanation\n with pytest.raises(NotImplementedError) as exinf:\n self.explainer.explain_instance()\n assert str(exinf.value) == instance_explanation", "def test_text_default(self):\n r = Review()\n self.assertEqual(\"\", r.text)", "def test_unsuccessful_rating_of_own_article(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.author_headers)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertIn(self.rate_own_article_error_message,\n response.data['message'])", "def log_error(title, message):\n if title == \"Redundant\":\n print(f\"[{title}]: Refactoring is not necessary\")\n else:\n print(f\"[{title}]: Refactoring is not allowed\")\n print(f\"{message}\")", "def testNoneWithAnother(self):\n exp_str = 'none: Cannot combine with other --timing_measurements options'\n exp_regex = r'^%s$' % re.escape(exp_str)\n with self.assertRaisesRegexp(flags.ValidationError, exp_regex):\n timing_util.ValidateMeasurementsFlag(['none', 'runtimes'])", "def find_mistake(self):\n if self.solve_stats[-1][\"comment\"] == \"\":\n for stat in reversed(self.solve_stats):\n if stat[\"comment\"] != \"\":\n self.solve_stats[stat[\"count\"]][\"comment\"] += \"mistake from here\"\n break", "def test_doc1(self):\n assert models.review.__doc__ is not None", "def test_session10_readme_proper_description():\n READMELOOKSGOOD = True\n f = open(\"README.md\", \"r\", encoding=\"utf-8\")\n content = f.read()\n f.close()\n for c in README_CONTENT_CHECK_FOR:\n if c not in content:\n print(c)\n READMELOOKSGOOD = False\n pass\n assert READMELOOKSGOOD == True, \"You have not described all the functions/class well in your README.md file\"", "def error_general(user: discord.User) -> str:\n return (\n f\"Well, nuts. I had some trouble processing your request, {user.mention}.\"\n f\" I'll DM you the details.\"\n )", "def test_liking_non_existent_comment(self):\n self.non_existing(self.like_url(3))", "def make_optional_silence_txt(self):\n raise NotImplementedError", "def initialize_error_summary() -> str:\n error_summary = '\\nSummary of <span class=\"tex-fatal\">Critical Errors:</span>\\n\\n<ul>\\n'\n return error_summary", "def remove_info(text, journal_id, label, doc_type='inkomst'):\r\n sections = text.split('NEWPAR')\r\n cleaned_text = ''\r\n diagnose_detected = False\r\n for section in sections:\r\n if section:\r\n section_header =list(filter(None, section.split(' ')))[0]\r\n #print(section_header)\r\n if 'diagnose' in section_header.lower() or 'DIAGNOSE' in section or 'Diagnose :' in section or 'Problemstilling :' in section:\r\n diagnose_detected = True\r\n else:\r\n cleaned_text += section + ' '\r\n if not diagnose_detected :\r\n print('No DIAGNOSE in: ', journal_id)\r\n return cleaned_text", "def test_not_the_owner(self):\n self.client2 = Client()\n errors = self.response.data[\"errors\"]\n errors[0][\"correction\"] = \"שתיתי\"\n self.response2 = self.client2.post(\n reverse(\"correct\"), {\"id\": 1, \"errors\": errors}, format=\"json\"\n )\n self.assertEqual(\n self.response2.data, \"You have no permission to correct this Deck\"\n )", "def clean_description(self):\n description = self.cleaned_data['description']\n if not re.match(r'[\\w{4}\\s*]+', description) or len(description) < 10:\n v_err('no_desc')\n return description", "def clean_comment(self):\n comment = self.cleaned_data[\"comment\"]\n if (not getattr(settings, 'COMMENTS_ALLOW_PROFANITIES', False) and\n getattr(settings, 'PROFANITIES_LIST', False)):\n bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()]\n if bad_words:\n raise forms.ValidationError(ngettext(\n \"Watch your mouth! The word %s is not allowed here.\",\n \"Watch your mouth! The words %s are not allowed here.\",\n len(bad_words)) % get_text_list(\n ['\"%s%s%s\"' % (i[0], '-' * (len(i) - 2), i[-1])\n for i in bad_words], gettext('and')))\n return comment", "def _reason_to_disqualify(self, agent: Agent):\n # Disconncet or timeout\n mephisto_agent = agent.mephisto_agent\n if mephisto_agent.get_status() in (\n AgentState.STATUS_EXPIRED,\n AgentState.STATUS_TIMEOUT,\n ):\n return 'agent was disconnected.'\n\n # Wizard not using search enough\n if agent.agent_id == 'Wizard' and (\n (self.num_search_queries < self.search_warning_threshold)\n or (self.num_times_search_resutls_selected < self.select_warning_threshold)\n ):\n return (\n 'blocked for not enough search activity '\n f'({self.num_search_queries} searches; '\n f'{self.num_times_search_resutls_selected} selected sentecnes).'\n )\n\n acceptability_checker_results = self.acceptability_checker.check_messages(\n agent.agent_id,\n self.selected_persona,\n messages=self.messages,\n is_worker_0=False,\n violation_types=constants.ACCEPTABILITY_VIOLATIONS,\n )\n if acceptability_checker_results:\n return f'ParlAI acceptability checker found violations: \"{acceptability_checker_results}\"'", "def test_spelling_mistake_for_technical_terms_not_in_context(self, style):\n with ExpectedException(LinterFailure):\n content = \"{s}{e}\\n\\\"\\\"\\\"technicalterm\\\"\\\"\\\"\"\n self._spellcheck_lint(content, style)", "def _check_required(self):\n if self.data['history_file'] is None:\n return\n required = self.data.get('required_changelog_text')\n if not required:\n return\n if isinstance(required, six.string_types):\n required = [required]\n history_last_release = self.data['history_last_release']\n for text in required:\n if text in history_last_release:\n # Found it, all is fine.\n return\n pretty_required = '\"{}\"'.format('\", \"'.join(required))\n if not utils.ask(\n \"WARNING: Changelog should contain at least one of \"\n \"these required strings: {}. Are you sure you \"\n \"want to release?\".format(pretty_required),\n default=False):\n sys.exit(1)", "def test_forbidden_words(self) -> None:\n pad_open: bool = False\n words: List[Word] = self.report.get_words()\n forbidden_words: List[Word] = []\n last_error: bool = False\n\n for word in words:\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n if (word.text in self.rules.forbidden_words) or any(\n [b in self.rules.forbidden_words for b in word.baseform]\n ):\n forbidden_words.append(word)\n last_error = True\n continue\n if last_error:\n last_error = False\n combo = \" \".join([w.text for w in forbidden_words])\n start, _ = self.report.get_word_postion(forbidden_words[0])\n _, end = self.report.get_word_postion(forbidden_words[-1])\n self.add_error(\n f\"Ordet {combo} får endast förekomma i citat.\", position=(start,end)\n )", "def test_readme_proper_description():\n READMELOOKSGOOD = True\n f = open(\"README.md\", \"r\", encoding=\"utf-8\")\n content = f.read()\n f.close()\n for c in README_CONTENT_CHECK_FOR:\n if c not in content:\n print(c)\n READMELOOKSGOOD = False\n break\n\n assert READMELOOKSGOOD is True, \"You have not described all the functions/class well in your README.md file\"", "def test_final_strongly_recommended(self):\n\n # move raw to X amd: i.e. there is no final\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns[\"X_normalization\"] = \"none\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. \"\n \"It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ],\n )", "def test_explain_non_existent_code(self):\n command_line = [\"pool\", \"explain\", \"bogus\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_with_preceding_text(self):\n\n expected = r'''\n <p>foo\n <strong>foo</strong></p>\n <details class=\"note\">\n <summary>Details</summary>\n </details>\n '''\n\n self.check_markdown(\n R'''\n foo\n **foo**\n ??? note \"Details\"\n ''',\n expected,\n True\n )", "def test_correction_dont_exist(self):\n errors = self.response.data[\"errors\"]\n errors[0][\"correction\"] = \"שתיתי\"\n self.response2 = self.client.post(\n reverse(\"correct\"), {\"id\": 3, \"errors\": errors}, format=\"json\"\n )\n self.assertEqual(\n self.response2.data,\n \"The Deck with 3 doesn't exist, we only keep decks for 1 hour\",\n )", "def test_spelling(self) -> None:\n misstakes: Dict[Word, List[str]] = self.report.spellcheck(\n self.rules.spelling_skip_wordclasses\n )\n for word, corrections in misstakes.items():\n if word.text.lower() in self.rules.forbidden_words:\n continue\n if word.text.lower() in [\n ab[\"word\"] for ab in self.rules.police_abbreviations\n ]:\n continue\n error_text: str = f\"Ordet {word.text} är felstavat.\"\n if corrections:\n error_text += \" Rättningsförslag: \" + \", \".join(corrections) + \".\"\n self.add_error(error_text, word=word)", "def test_related_add_same_language(app, testdata):\n doc1 = Document.get_record_by_pid(testdata[\"documents\"][0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(testdata[\"documents\"][1][\"document_pid\"])\n\n doc1.related.add_language(doc2)\n with pytest.raises(RelatedRecordError):\n doc1.related.add_language(doc2)", "def default(self, answer):\n return u'Incorrect answer'", "def clean(self):\n if not self.link and not self.text:\n raise ValidationError('Must include a link AND/OR a text value')", "def ugettext(self, text):\r\n if text == \"There was a problem with the staff answer to this problem.\":\r\n text = \"TRANSLATED!\"\r\n return text", "def error_false(self):\n self.errors = self.errors[0:-1]\n if not self.errors:\n self.update_info()", "def test_other(self):\n self.assertRaises(ValueError, isStringTrue, \"dog\")\n return", "def test_return_goal_weight_text_if_incorrect_data(self):\n # create user\n user_created = self.create_user_questionnaire_in_progress()\n\n # data\n data_dict = {\"height\": \"1,60\", \"actual_weight\": \"80\",\n \"cruising_weight\": \"50\", \"weight_goal\": \"90\"}\n\n # call method\n context = self.new_controller.return_goal_weight_text_save_weight(data_dict,\n user_created.id)\n\n dict_questions = {\"height\": \"Quelle taille fais-tu ? (au format x,xx)\",\n \"actual_weight\": \"Quel est ton poids actuel ?\",\n \"cruising_weight\": \"Quel est ton poids de croisière \"\n \"(poids le plus longtemps \"\n \"maintenu sans effort) ?\",\n \"weight_goal\": \"Quel est ton poids d'objectif ?\"}\n\n self.assertEqual(len(context), 3)\n self.assertEqual(context[\"dict_questions\"], dict_questions)\n self.assertTrue(context[\"goal_weight_text\"], \"Nous allons maintenant définir ton objectif.\")\n self.assertTrue(context[\"error_message\"], \"Ton objectif doit être inférieur \"\n \"à ton poids actuel.\")", "def test_null_field(self):\r\n problem = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNotNone(problem.markdown)\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'nullout': ['markdown']}\r\n )\r\n problem = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNone(problem.markdown)", "def test_make_compatible_taxa_summaries_incompatible(self):\r\n self.assertRaises(ValueError, _make_compatible_taxa_summaries,\r\n self.taxa_summary3, self.taxa_summary4)\r\n self.assertRaises(ValueError, _make_compatible_taxa_summaries,\r\n self.taxa_summary1, self.taxa_summary2)", "def label_rule_for_others(text: str, label_type: str) -> str:\n match = re.search(LABEL_SPECIFICATION[f\"RE_{label_type.upper()}\"], text)\n if match:\n return match.group(\"label\").strip()\n return \"\"", "def test_doc2(self):\n assert Review.__doc__ is not None", "def test_required_fields_title(self):\n\n del self.validator.adata.uns[\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors, [\"ERROR: 'title' in 'uns' is not present.\"]\n )" ]
[ "0.6536794", "0.6193883", "0.59964615", "0.59149736", "0.5794971", "0.57020473", "0.568294", "0.5651748", "0.56196064", "0.55880433", "0.556072", "0.55405277", "0.5500709", "0.54977834", "0.5488182", "0.5431535", "0.5406004", "0.5385384", "0.5364079", "0.53436726", "0.53365856", "0.53310394", "0.5310975", "0.53080356", "0.53049743", "0.53034663", "0.529307", "0.5272167", "0.52096075", "0.52003884", "0.5195704", "0.51951116", "0.51859367", "0.518166", "0.5177244", "0.5164928", "0.5160837", "0.5160837", "0.51500314", "0.5145212", "0.5114677", "0.5108634", "0.5101004", "0.50907326", "0.50881594", "0.5086672", "0.50855875", "0.5084082", "0.507437", "0.50730217", "0.50714797", "0.5071449", "0.5062327", "0.50551033", "0.5054966", "0.5051138", "0.50421876", "0.50417364", "0.5038345", "0.5037221", "0.5030685", "0.5019474", "0.5014456", "0.5014197", "0.49998948", "0.4992352", "0.49904373", "0.49870738", "0.49838132", "0.498318", "0.4982648", "0.49825203", "0.497608", "0.49736977", "0.49717325", "0.49655846", "0.49637303", "0.49586424", "0.4955657", "0.49556077", "0.49510345", "0.49500442", "0.49497244", "0.4949366", "0.49476057", "0.4946822", "0.49467865", "0.49351987", "0.4931941", "0.49319312", "0.49301833", "0.4925764", "0.49255526", "0.49220502", "0.49124807", "0.49120888", "0.49069986", "0.4906686", "0.49052915", "0.49015832" ]
0.5011413
64
Can this crawler process this URL?
def offer(self, url): parts = urlparse(url) return bool(self.KT_RE.match(parts.netloc))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_handle(self, url):\n return self.url_re.match(url)", "def match_url(self, url):\n pass", "def can_fetch(self, useragent, url):\n target_url = url\n if self.root_path:\n target_url = re.sub(self.root_path, \"\", target_url)\n return super(Robot, self).can_fetch(useragent, target_url)", "def can_fetch(self, agent, url):\n if not url.startswith(self.base_url):\n url = urljoin(self.base_url, url)\n if self.robot_parser:\n return self.robot_parser.can_fetch(agent, url)\n return True", "def canDo_url(self, url):\n hostname = urlparse.urlsplit(url)[1]\n for hostEnd in self.highwireHosts:\n if hostname.endswith(hostEnd):\n logging.log(5, 'url hostname %s ends with %s -> highwire' % (hostname, hostEnd))\n return True\n\n if hostname in self.hostCache:\n ipAddr = self.hostCache[hostname]\n else:\n logging.debug('Looking up IP for %s' % hostname)\n try:\n ipAddr = socket.gethostbyname(hostname)\n self.hostCache[hostname] = ipAddr\n except socket.gaierror:\n raise pubGetError('Illegal hostname %s in link' % hostname, 'invalidHostname', hostname)\n\n ipParts = ipAddr.split('.')\n ipParts = [ int(x) for x in ipParts ]\n result = ipParts[0] == 171 and ipParts[1] in range(64, 68)\n if result == True:\n logging.log(5, 'hostname %s is highwire host' % hostname)\n return result", "def validate_url(self):\n pass", "def valid_url(self):\r\n if self.resolver:\r\n return True\r\n return False", "def _match(cls, url, **kwargs):\n return url.scheme.startswith('http')", "def canDo_url(self, artMeta):\n return False", "def crawl(self, url):\n return None", "def verify(self):\n if self.geturl():\n return True\n return False", "def isValidURL(self, url):\n if \"imdb.com\" in url:\n return True\n else:\n return False", "def checkForURL(self, data):\n \n moduleCoordinator.ModuleCoordinator().addEvent(moduleCoordinator.URL_EVENT, data, self.hash)", "def canHandleUrl(cls, url):\n return url.startswith(\"https://cc0textures.com/view.php?tex=\")", "def url_is_good(url):\n return website_re.match(url)\n # possible validation of reachability of website\n # http_response = requests.get(url)\n # return http_response < 400:", "def check_url_invalidity(self) -> bool:\n validate = URLValidator()\n try:\n validate(self.args.url)\n return False\n except ValidationError:\n return True", "def target_url(self, url):\n url_parse = urlparse.urlparse(url)\n patten = re.compile(self.url_patten)\n if patten.match(url_parse.path):\n return True\n else:\n return False", "def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False", "def _is_request_to_token_url(self, request):\n if not self.token_url:\n return False\n\n if self.token_url == request.path:\n return True\n\n request.match(self.token_url)\n\n if request.matchdict:\n return True\n\n return False", "def download_allowed(self, url, scheme, netloc):\n robot = urllib.robotparser.RobotFileParser('%s://%s/%s' % (scheme, netloc, config.ROBOTS))\n try:\n robot.read()\n except ValueError:\n raise urllib.error.URLError('<urlopen error no protocol given>')\n\n return robot.can_fetch(config.USER_AGENT, url)", "def _is_valid(self, url: ParseResult):\n\n if (\n re.match('(.*).' + self.netloc, url.netloc) is None or\n re.match('(.*)\\+[0-9]*$', url.path) is not None or\n re.match('(.*)javascript:(.*)', url.path) is not None\n ):\n return False\n\n return True", "def check_url(url=None, parse_url=None):\n return False", "def __isUrl(self, url):\n if type(url)==str:\n return url.startswith('http://') or url.startswith('https://')\n return False", "def is_link(self, url):\n return not self.is_page(url)", "def check_if_can_fetch(self, url, useragent=\"*\"):\n logger.debug(\"Checking if can fetch %s\" % url)\n return self.rp.can_fetch(useragent=useragent, url=url)", "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "def valid(url):\n return 0 < len(urlparse(url)[1])", "def checkVisitedUrl(self, url):\n self.lock.acquire()\n r = url in self.__visitedUrls\n self.lock.release()\n return r", "def is_senior_allow(self, url):\n for src_pat, dir_pat in self.senior_allow_res.iteritems():\n if src_pat.match(self._task_url) and dir_pat.match(url):\n return True\n return False", "def clean_urls(self) -> bool:\n return pulumi.get(self, \"clean_urls\")", "def is_url_valid(self, url: str) -> bool:\n if self.exclude and re.search(self.exclude, url):\n return False\n\n parts = urllib.parse.urlparse(url)\n\n if parts.scheme not in ('http', 'https'):\n LOGGER.debug(f'skipping non-http scheme in found at {url}')\n return False\n\n host, _ = urllib.parse.splitport(parts.netloc) # type: ignore\n\n if not self.host_okay(host):\n LOGGER.debug(f'skipping non-root host found at {url}')\n return False\n\n return True", "def url_exists(url):\n # Check for URLs we can't validate\n if url.startswith(\"https://kiwiirc.com\"):\n return True\n if url.startswith(\"https://www.projectcalico.org\"):\n return True\n\n try:\n urllib2.urlopen(url)\n return True\n except urllib2.HTTPError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False\n except urllib2.URLError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False", "def _url_exists(self, url):\n return url_exists(url)", "def process_request(self, request):\n # if the site is disabled, then it's like it's invisible, so it's not a match for this site\n if (not self.get_isenabled()):\n return False\n\n # if the request does not match site prefix, then it's not a match for this site\n if (not self.does_request_match_siteprefix(request)):\n return False;\n\n # ok, looks like it was meant for us\n\n # before we start a request we might have stuff to do\n self.process_request_starts(request)\n\n # log it\n #self.logevent(EInfo(\"Request URL: {0} from {1}.\".format(request.get_fullurlpath_original(), request.get_remote_addr())),request=request)\n self.logevent(EInfo(\"Request URL: {0} from {1}.\".format(request.get_fullurlpath_original(), request.get_remote_addr())))\n\n # handle the request\n ishandled = self.comp('routemanager').process_request(self, request)\n\n # after we end a request we might have stuff to do (this might include, for example, flushing the database)\n self.process_request_ends(request, ishandled)\n\n # return whether we handled it\n return ishandled", "def _is_link_allowed(self, link):\n denied = [re.match(r, link) for r in self.crawl_rules_deny]\n denied = [x for x in denied if x is not None]\n\n crawl_rules_allow = self.crawl_rules_allow\n if not self.crawl_rules_allow:\n crawl_rules_allow = (\".*\",)\n \n allowed = [re.match(r, link) for r in crawl_rules_allow]\n allowed = [x for x in allowed if x is not None]\n\n return not bool(denied) and bool(allowed)", "def should_process_request(self, request):\r\n path = request.META['PATH_INFO']\r\n\r\n ignored_url_patterns = getattr(settings, 'TRACKING_IGNORE_URL_PATTERNS', [])\r\n for pattern in ignored_url_patterns:\r\n # Note we are explicitly relying on python's internal caching of\r\n # compiled regular expressions here.\r\n if re.match(pattern, path):\r\n return False\r\n return True", "def contains_redirect(content, _url):\n frame_url = contains_frame_redirect(content) if (len(content) < 10000000) else False\n if frame_url:\n debug(\"frame_url: {}\".format(frame_url))\n return frame_url\n\n meta_redir = contains_special_redirect(content, _url)\n if meta_redir:\n debug(\"metaredir: {}\".format(meta_redir))\n return meta_redir\n\n return False", "def url_check(url):\n try:\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n urllib.request.urlopen(request)\n return True\n \n except ValueError:\n return False\n\n except urllib.request.HTTPError:\n return False\n \n except URLError:\n return False", "def link_checker(start_url, reg_ex, robots_url=None, user_agent=\"wswp\",delay=4, max_depth=2, scrape_callback=None):\r\n # create list for links which match our reg_ex\r\n page_queue = [start_url]\r\n # keep tracking links already seen in dict\r\n # web_pages are connected themselves\r\n seen = {}\r\n data = []\r\n\r\n # check if robots.txt exist\r\n if not robots_url:\r\n robots_url = \"{}/robots.txt\".format(start_url)\r\n # get robots txt\r\n rob_p = robots_parser(robots_url)\r\n timer_r = Tajmer(delay)\r\n #throttle = Throttle(delay)\r\n # loop links\r\n while page_queue:\r\n # get and then remove last element of list\r\n url = page_queue.pop()\r\n # check if passes web_page restrictions\r\n if rob_p.can_fetch(user_agent,url):\r\n # check url depth\r\n depth = seen.get(url, 0)\r\n # if max depth skip\r\n if depth == max_depth:\r\n print(\"Page skipped due to depth: \", url)\r\n continue\r\n\r\n timer_r.wait(url)\r\n #throttle.wait(url)\r\n # open page\r\n html = get_page(url, user_agent=user_agent)\r\n # if unable to open skip\r\n if not html:\r\n continue\r\n if scrape_callback:\r\n data.extend(scrape_callback(url, html) or [])\r\n # scrap data here\r\n\r\n # filter links matching regex\r\n # use link_finder function to get all links from html\r\n for link in link_finder(html):\r\n # check if matches reg_ex\r\n if re.match(reg_ex, link):\r\n # create absolute link start url + link\r\n abs_link = urljoin(start_url, link)\r\n # add to seen dictionary if first time opened\r\n # add to link list\r\n if abs_link not in seen:\r\n seen[abs_link] = depth + 1\r\n page_queue.append(abs_link)\r\n else:\r\n print(\"Blocked by robots.txt: \", url)", "def is_valid(url):\n\n HAVERFORD_TOKEN = 'Haverford users only'\n INVALID_TOKENS = [HAVERFORD_TOKEN, \"Site Intel\", \"SITE Institute\"]\n content = urlopen(url).read()\n\n for token in INVALID_TOKENS:\n if token in content:\n return False\n return True", "def test_url(quartus, part, url):\n print(\"\\rChecking %s/%s \" % (quartus, part), end='')\n try:\n response = urllib.request.urlopen(url)\n headers = response.getheaders()\n return True\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n return False", "def crawl(self):\n self._process_urls()\n self._process_feeds()\n self._downloader.process_all()\n return self._feeds is not None", "def check_url(url=None, parse_url=None):\n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n unsupported = ['twitcam.', 'new.']\n return parse_url.netloc.endswith('livestream.com')\\\n and not any(x in parse_url.netloc for x in unsupported)\\\n and len(parse_url.path.split('/')) > 2", "def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)", "async def has_url(self, url: StrOrURL) -> bool:\n key = self.create_key('GET', url)\n return await self.responses.contains(str(key)) or await self.redirects.contains(str(key))", "def checkURL(cls, trust_root, url):\n tr = cls.parse(trust_root)\n return tr is not None and tr.validateURL(url)", "def ask_robots(url: str, useragent: str) -> bool:\n\n url_struct = urlparse(url)\n base = url_struct.netloc\n if base not in _rp:\n _rp[base] = RobotFileParser()\n _rp[base].set_url(url_struct.scheme + \"://\" + base + \"/robots.txt\")\n _rp[base].read()\n return _rp[base].can_fetch(useragent, url)", "def is_url(self, url):\n return self.is_regex_url(url, self.is_url_regex)", "def _can_ping_url(self, url, headers):\n try:\n self.http_request(url, \"GET\", \"\", headers, timeout=.75)\n return True\n except:\n return False", "def ssi_url(self, url):\n return self.is_regex_url(url, self.is_ssi_regex)", "def ApplyRule(self, return_value, request, response):\n del response # unused.\n url = '%s%s' % (request.host, request.full_path)\n if not self._url_re.match(url):\n return False, return_value\n\n logging.debug('url: %s', url)\n return self._stop, True", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc.endswith('bambuser.com')\\\n and bool(re.search('^\\/(v|broadcast)\\/\\d+(\\.live)?$', parse_url.path))", "def matches(self, url):\n split = urlparse.urlsplit(url)\n return self.host == split.hostname", "def isUrlValid(self, url):\n if url is None:\n return False\n elif url.startswith('//'):\n return False\n elif ':' in url:\n return False\n elif url.startswith('/wiki'):\n return True\n elif 'en.wikipedia.org/wiki/' not in url:\n return False\n return True", "def is_url_requirement(ireq):\n return bool(ireq.original_link)", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc.endswith('slideshare.net')", "def check_if_not_visited(url):\n return (url not in VISITED_LINKS)", "def is_page(self, url):\n netloc = urlparse.urlparse(url).netloc.lower()\n return any(map(lambda domain: netloc.endswith(domain), self.allowed_domains))", "def check_url_format(self):\r\n m = re.match(\"^http://www.tesco.com/direct/[0-9a-zA-Z-]+/[0-9-]+\\.prd$\", self.product_page_url)\r\n n = re.match(\"^http://www.tesco.com/.*$\", self.product_page_url)\r\n return (not not m) or (not not n)", "def process_url(self, url, info):\n\t\tret_val = False # Default to 'False', meaning no file was located by a handler.\n\t\tfor h in self.handlers:\n\t\t\tprint(\"\\tChecking handler: %s\" % h.tag)\n\t\t\tret = h.handle(url, info)\n\t\t\tif ret is None:\n\t\t\t\t# None is returned when the handler specifically wants this URL to be \"finished\", but not added to the files list.\n\t\t\t\tstringutil.print_color(Fore.GREEN, \"\\t+Handler '%s' completed correctly, but returned no files!\" % h.tag )\n\t\t\t\tret_val = None\n\t\t\t\tbreak\n\t\t\tif ret:\n\t\t\t\t# The handler will return a file/directory name if it worked properly.\n\t\t\t\tret_val = stringutil.normalize_file(ret)\n\t\t\t\tstringutil.out(\"%s\\t+Handler '%s' completed correctly! %s%s\" % (Fore.GREEN, h.tag, stringutil.fit(ret_val, 75), Style.RESET_ALL) )\n\t\t\t\tbreak\n\t\t\t#\n\t\t#\n\t\tif ret_val is False:\n\t\t\tstringutil.error(\"\\t!No handlers were able to accept this URL.\" )\n\t\treturn ret_val", "def check_url(url):\n return 'products.json' in url", "def unable_to_crawl(self) -> bool:\n return pulumi.get(self, \"unable_to_crawl\")", "def crawl_new_url(self):\n url_returned = self.obj_scheduler.get_next_url()\n \n if self.obj_scheduler.can_fetch_page(url_returned[0]):\n return None\n else:\n binary_content = self.request_url(url_returned[0])\n \n if binary_content != None:\n return self.discover_links(url_returned[0], url_returned[1], binary_content)\n else:\n return None", "def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1", "def process_url(self, url, html):\n html = self.getHtmlContent(html, 'content')\n new_urls = self.getValidUrlsFromHtml(html)\n return new_urls", "def test_url_pattern(self):\n\t\turl = URLFilter()\n\t\turl.set_limit(\"goog*\")\n\t\tself.assertTrue(url.check(Object(get_urls=lambda: ['google.com'])))", "def _api_call(self, url, response_checker):\n self.request_compare(url)", "def check_url(url: str) -> bool:\n try:\n potential_error = driver.find_element_by_xpath(\"/html/body/div[5]/div/div/div[1]/div/div/div/section/div[2]/div\").text\n if '403' in potential_error:\n return True\n except:\n return False", "def url_checker(url):\n if url.startswith(http_req):\n url_name = url[7:]\n # print('URL check passed. Using http')\n return url_name\n if url.startswith(https_req):\n url_name = url[8:]\n # print('URL check passed. Using https')\n return url_name\n else:\n print('URL check failed. not valid http or https URL')\n print(f'Bad URL:{url}')\n sys.exit()\n # return False", "def is_goal(self, url):\n return url == self.goal_url", "def check_url(url):\n # see also http://stackoverflow.com/questions/2924422\n good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]\n return get_server_status_code(url) in good_codes", "def is_url(url):\n\n return bool(re.match(re_url, url))", "def shouldSkipUrl(self, url, data):\n return url in (\n # video\n self.stripUrl % '1880',\n self.stripUrl % '1669',\n )", "def check_url_format(self):\n\n m = re.match(r\"^http://www\\.flipkart\\.com/.*/p/.*$\", self.product_page_url)\n\n return not not m", "def validate_url(url):\n response, content = get_response_from_file(url)\n\n if response == None and content == None:\n response, content = get_response_and_content(url)\n\n if response == None:\n return url, url, 0, \"\", \"N\", \"N\", \"N\", hit(\"No Response\"), \"false\"\n else:\n #print(url, get_visible_text(content))\n return evaluate_content_for_200s(response, url, content)", "def test_is_url(self):\n\n url = \"https://shadowrun.needs.management\"\n self.assertTrue(run(verification.is_url(url)))\n\n url = \"https:// www.google.com\"\n self.assertFalse(run(verification.is_url(url)))", "def query(url):", "def is_allowed(self, user_agent, url, syntax=GYM2008): \n if PY_MAJOR_VERSION < 3:\n # The robot rules are stored internally as Unicode. The two lines \n # below ensure that the parameters passed to this function are \n # also Unicode. If those lines were not present and the caller \n # passed a non-Unicode user agent or URL string to this function,\n # Python would silently convert it to Unicode before comparing it\n # to the robot rules. Such conversions use the default encoding \n # (usually US-ASCII) and if the string couldn't be converted using\n # that encoding, Python would raise a UnicodeError later on in the\n # guts of this code which would be confusing. \n # Converting the strings to Unicode here doesn't make the problem\n # go away but it does make the conversion explicit so that \n # failures are easier to understand. \n if not isinstance(user_agent, unicode):\n user_agent = user_agent.decode()\n if not isinstance(url, unicode):\n url = url.decode()\n \n if syntax not in (MK1996, GYM2008):\n _raise_error(ValueError, \"Syntax must be MK1996 or GYM2008\")\n \n for ruleset in self.__rulesets:\n if ruleset.does_user_agent_match(user_agent):\n return ruleset.is_url_allowed(url, syntax)\n \n return True", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n invalid_paths = ['^\\/?$', '^\\/(stream|explore|groups|upload|you|dashboard|messages|settings|creativecommons|tracks|people)(\\/|$)']\n \n return parse_url.netloc in ['soundcloud.com', 'www.soundcloud.com', 'm.soundcloud.com']\\\n and not any(re.search(invalid_path, parse_url.path) for invalid_path in invalid_paths)", "def match(self, url):\n if self.is_global:\n return True\n\n # For easy comparison, we strip leading and trailing slashes,\n # and then split both self.url and the supplied URL on\n # slashes, to get two lists of path components we can compare.\n self_bits = self.url.strip(\"/\").split(\"/\")\n url_bits = url.strip(\"/\").split(\"/\")\n\n # If self.url produced a longer list of path components than\n # the supplied URL, it can't be a match.\n if len(self_bits) > len(url_bits):\n return False\n\n return self_bits == url_bits[: len(self_bits)]", "def validaURL(url: AnyStr) -> bool:\n\n return re.compile(patternURL).search(url) != None # Linea 1", "def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return False\n return True", "def _validate_url(url):\n if not url or url.count('/') != 1 or url[0] != '@':\n return False\n return True", "def check_url(url):\n good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]\n return get_server_status_code(url) in good_codes", "def is_tiny(self, url):\n return urlparse.urlsplit(url).netloc in self.get_services()", "def updateURLs(self, tree):\n urls = set()\n #Remove all links we have already visited\n for link in tree.findall(\".//a\"):\n try:\n url = urllib.parse.urldefrag(link.attrib['href'])[0]\n if (url and url not in self.unvisitedURLs and url\n not in self.visitedURLs):\n urls.add(url)\n except KeyError:\n pass\n\n #Remove all non-http URLs and a dd a sutiable base URL where that is\n #missing\n newUrls = set()\n for url in urls:\n splitURL = list(urllib.parse.urlsplit(url))\n if splitURL[0] != \"http\":\n continue\n if splitURL[1] == \"\":\n splitURL[1] = urllib.parse.urlsplit(self.currentURL)[1]\n newUrls.add(urllib.parse.urlunsplit(splitURL))\n urls = newUrls\n\n responseHeaders = {}\n #Now we want to find the content types of the links we haven't visited\n for url in urls:\n try:\n resp, content = self.http.request(url, \"HEAD\")\n responseHeaders[url] = resp\n except AttributeError as KeyError:\n #Don't know why this happens\n pass\n\n\n #Remove links not of content-type html or pages not found\n #XXX - need to deal with other status codes?\n toVisit = set([url for url in urls if url in responseHeaders and\n \"html\" in responseHeaders[url]['content-type'] and\n responseHeaders[url]['status'] == \"200\"])\n\n #Now check we are allowed to spider the page\n for url in toVisit:\n robotURL = list(urllib.parse.urlsplit(url)[:2])\n robotURL.extend([\"robots.txt\", \"\", \"\"])\n robotURL = urllib.parse.urlunsplit(robotURL)\n self.robotParser.set_url(robotURL)\n if not self.robotParser.can_fetch(\"*\", url):\n toVisit.remove(url)\n\n self.visitedURLs.update(urls)\n self.unvisitedURLs.update(toVisit)", "def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']", "def check_url(url=None, parse_url=None):\n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return (parse_url.netloc == 'vine.co' or parse_url.netloc.endswith('.vine.co')) \\\n and re.search('/v/\\w', parse_url.path) is not None", "def is_safe_url(target: str) -> bool:\n ref_url = urlparse(request.host_url)\n test_url = urlparse(urljoin(request.host_url, target))\n return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc", "def __check_lazy(self, url: str, json_file: str):\n\n parsed_url = urlparse(url)\n\n netloc = parsed_url.netloc\n\n with open(json_file) as lazy_sites:\n data = json.load(lazy_sites)\n \n for site in data['lazy_sites']:\n match = re.match(site['domain'], netloc)\n\n if match:\n return True\n \n return False", "def check_link_in(self, url):\n url_hash = tools.url_hash(url)\n if url_hash not in self.__links:\n self.__link_lock.acquire()\n self.__links.add(url_hash)\n self.__link_lock.release()\n return False\n else:\n return True", "def senior_url_pattern_filter(self, url):\n for src_pat, dir_pat in self._senior_url_pattern.iteritems():\n if src_pat.match(self._task_url) and dir_pat.match(url):\n return True\n return False", "def is_accessible(url: str) -> bool:\n try:\n return requests.get(url).status_code == requests.codes.ok\n except Exception:\n return False", "def is_safe_url(target):\n ref_url = urlparse(request.host_url)\n test_url = urlparse(urljoin(request.host_url, target))\n return test_url.scheme in ('http', 'https') and \\\n ref_url.netloc == test_url.netloc", "def check_if_exist(self,url):\r\n\t\t\"\"\" verefier si un lien existe \"\"\"\r\n\t\trequest = mechanize.Request(url)\r\n\t\tBAD_REQ = [400,401,404]\r\n\t\ttry :\r\n\t\t\tresponse = mechanize.urlopen(request)\r\n\t\t\tif response.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True\r\n\t\texcept urllib2.HTTPError, error:\r\n\t\t\tif error.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True", "def check_heartbeat(self):\n try:\n req = request(self.values['url'].data)\n response = urllib.urlopen(req)\n the_page = response.read()\n return True\n except urllib.HTTPError as e:\n if e.code == 400:\n return True\n else:\n logger.exception('[%s] - Exception when checking heartbeat')\n return False\n except Exception:\n logger.exception('[%s] - Exception when checking heartbeat')\n return False", "def check_url(url_link):\n res = requests.get(url_link, allow_redirects =True)\n if res.status_code == 200:\n print('valid URL \\n')\n return url_link\n else:\n print('Oupps there is something wrong with your URL. Run the program again!! ')\n return res.status_code", "def __verify(self, href):\n # change main url to avoid mistakes with http ou https\n main = self.main_url.replace('https://', '').replace('http://', '')\n forbiden = {\"#\", 'None'} # forbidden possible urls\n if (href is None) or (href in forbiden):\n return False\n for item in ['tel:', 'mailto:', 'javascript:']:\n if item in href: # verify if is a link to telephone, e-mail or javascript\n return False\n if main in href and (\"/checkout/cart/add\" in href or \"/checkout/#/cart\" in href):\n return False # prevents a purchase from being made\n elif main in href or (main not in href and href[:4] != \"http\"):\n return True # possible case of a valid link\n else:\n return False # any other link is not valid", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return (parse_url.netloc == 'vimeo.com' or parse_url.netloc.endswith('.vimeo.com')) and 'hubnut/album/' not in parse_url.path", "def crawl(url):\n\n if not url in visited_urls:\n try:\n r = sess.get(url, headers={'User-Agent': user_agent}, stream=True)\n visited_urls.add(url)\n # Don't download non html files\n if r.headers['content-type'].startswith(\"text/html\"):\n # TODO: Use console logger\n print url, datetime.datetime.now()\n links = extract_links(r.text.encode(\"utf-8\"))\n update_queue(url, links)\n return r.text.encode('utf-8', 'ignore')\n # what if url is email address\n except requests.exceptions.MissingSchema, e:\n print(e)\n return \"\"\n except requests.ConnectionError, e:\n # Any requests exception, log and don't quit.\n print(e)\n logger.error(e)" ]
[ "0.73433197", "0.6720704", "0.66891474", "0.6490422", "0.647403", "0.6457199", "0.6456133", "0.6403348", "0.6397142", "0.636222", "0.6355213", "0.6349857", "0.6321741", "0.630184", "0.62799656", "0.6256721", "0.62470055", "0.6201895", "0.61920714", "0.61752903", "0.6173302", "0.6135701", "0.61328715", "0.6121867", "0.6116078", "0.60691535", "0.60603833", "0.60395396", "0.60009456", "0.5975256", "0.5962593", "0.59582555", "0.5947889", "0.5943238", "0.5930825", "0.5903612", "0.5902718", "0.58960295", "0.58843356", "0.58827686", "0.5872743", "0.5867929", "0.5857188", "0.5856685", "0.5854962", "0.5850448", "0.5843178", "0.5841918", "0.58294463", "0.5826025", "0.58257353", "0.58245116", "0.58211476", "0.581499", "0.5802479", "0.5796005", "0.57683724", "0.5767495", "0.57603234", "0.5758469", "0.5753327", "0.5746315", "0.5738517", "0.5737337", "0.57356244", "0.57251954", "0.5716192", "0.5716138", "0.57033664", "0.5687752", "0.5687628", "0.56823444", "0.5677273", "0.5675485", "0.5667639", "0.56604177", "0.56595594", "0.56558937", "0.56461376", "0.56449527", "0.5642104", "0.5639807", "0.56373394", "0.5631608", "0.5629605", "0.5627345", "0.5615129", "0.5612932", "0.56077725", "0.5602856", "0.56026745", "0.56000507", "0.5597259", "0.5594821", "0.558968", "0.5581816", "0.55686164", "0.55670893", "0.5564919", "0.5563946" ]
0.619864
18
Fetch and return the raw HTML for this url. The return content is a unicode string.
def fetch(self, url): self.log.info("Fetching URL: " + url) r = requests.get(url, verify=False) # raise an HTTPError on badness r.raise_for_status() # this decodes r.content using a guessed encoding return r.text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_raw_html(self):\n buffer = BytesIO()\n c = pycurl.Curl()\n c.setopt(c.URL, self.url)\n c.setopt(c.WRITEDATA, buffer)\n c.perform()\n c.close()\n return buffer.getvalue()", "def getHtml(self, url):\n r = requests.get(url)\n html = r.content\n return html", "def get_html(url):\n response = requests.get(url)\n response.encoding = 'utf-8'\n return response.text", "def getHtml(url):\n return urlopen(url)", "def get_html(url: str) -> str:\n headers = {\n 'User-Agent': Config.Scraper.user_agent,\n }\n logging.debug('User-Agent: ' + headers['User-Agent'])\n r = requests.get(url.strip(), headers=headers)\n r.encoding = 'utf8'\n print('[Status Code: %s]' % r.status_code)\n if r.status_code != 200:\n raise Exception('Error in get HTML!')\n return r.text", "def get_html(url):\n return urllib.request.urlopen(url)", "def getHTML(url): \n return urlopen(url)", "def _get_page_html(url: str, data: dict=None, headers: dict=None) -> str:\n response = requests.post(url, data=data, headers=headers) # TODO use a connection pool (single threading it like this is monumentally inefficient)\n content = response.content\n decoded = content.decode('utf-8') #TODO this is an assumption. we should probably get the charset header and use that\n return decoded", "def html(self) -> str:\n if self.html_file:\n with open(self.html_file, \"r\") as f:\n return f.read()\n else:\n try:\n return get(self.url)\n except HTTPError as e:\n if e.code == 404:\n raise PageNotFoundException(\n e.code,\n f\"Object {self.id} not found. Check that the id is correct.\",\n )\n return \"\"", "async def fetch_html(url: str,\n session: aiohttp.ClientSession,\n **kwargs) -> str:\n\n resp = await session.request(method=\"GET\", url=url, **kwargs)\n resp.raise_for_status()\n logger.info(\"Got response [%s] for URL: %s\", resp.status, url)\n html = await resp.text()\n return html", "def fetchUrl(self, url):\n self.driver.get(url)\n html = self.driver.page_source\n return html", "def get_html_content():\n \n request = urllib2.Request(RBI_URL, headers=HEADERS)\n page = urllib2.urlopen(request)\n html_content = page.read()\n return html_content", "def get_content_from_ulr(self):\n response = urllib.request.urlopen(self.url)\n if response.getcode() != 200:\n self.logger.info(\"Cisco - get_content_from_url()\")\n raise ConnectionError('Unable to load ', self.url)\n content = response.read()\n response.close()\n return content", "def get_html_from_url(url):\n request = requests.get(url)\n data = request.text\n return data", "def getHTML(self):\n html = requests.get(self.URL).text\n soup = BeautifulSoup(html, \"lxml\")\n return soup", "def get_html_content(self, url):\n\n req = urllib2.Request(url, headers=self.HEADER)\n page = urllib2.urlopen(req)\n soup = BeautifulSoup(page)\n\n return soup", "def retrieve_html(url):\n req = urllib2.Request(url)\n req.add_header('User-Agent', 'Just-Crawling 0.1')\n request = None\n status = 0\n try:\n logger.info(\"Crawling %s\" % url)\n request = urllib2.urlopen(req)\n except urllib2.URLError as e:\n logger.error(\"Exception at url: %s\\n%s\" % (url, e))\n except urllib2.HTTPError as e:\n status = e.code\n except:\n return\n if status == 0:\n status = 200\n\n try:\n data = request.read()\n except:\n return\n\n return str(data)", "def get_html(url):\n print('fetching', url)\n try:\n re = requests.get(url, timeout=1, stream=True)\n print('success!')\n # limit file size to 1mb\n html = re.raw.read(1000000+1, decode_content=True)\n if len(html) > 1000000:\n raise ValueError('response too large')\n return html\n except:\n raise TimeoutError('request timed out')", "def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html", "def __download_web(self):\n page = requests.get(self.url)\n\n if page.status_code == 200:\n return BeautifulSoup(page.content, \"html.parser\")", "def getHTMLText(url):\n try:\n r = requests.get(url, timeout = 300)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n return \"\"", "def fetch_url(url):\n try:\n soup = bs(urlopen(url).read(), 'html.parser')\n return soup\n except:\n print \"Couldnot download the content from the URL\", url\n return \"\"", "def _get_text(self, remove_newlines=True):\n if not self.text:\n url = self.url\n try:\n self.log.debug(\"Try to get content from page {}\".format(url))\n r = requests.get(url)\n except requests.exceptions.RequestException as e:\n self.log.warn(\"Unable to get page content of the url: {url}. \"\n \"The reason: {exc!r}\".format(url=url, exc=e))\n raise ParsingError(e.strerror)\n\n ud = UnicodeDammit(r.content, is_html=True)\n\n enc = ud.original_encoding.lower()\n declared_enc = ud.declared_html_encoding\n if declared_enc:\n declared_enc = declared_enc.lower()\n # possible misregocnition of an encoding\n if (declared_enc and enc != declared_enc):\n detect_dict = chardet.detect(r.content)\n det_conf = detect_dict[\"confidence\"]\n det_enc = detect_dict[\"encoding\"].lower()\n if enc == det_enc and det_conf < THRESHOLD_OF_CHARDETECT:\n enc = declared_enc\n # if page contains any characters that differ from the main\n # encoding we will ignore them\n content = r.content.decode(enc, \"ignore\").encode(enc)\n htmlparser = etree.HTMLParser(encoding=enc)\n root = etree.HTML(content, parser=htmlparser)\n etree.strip_elements(root, html.etree.Comment, \"script\", \"style\")\n text = html.tostring(root, method=\"text\", encoding=\"unicode\")\n\n if remove_newlines:\n self.log.debug(str(type(text)))\n text = re.sub('\\s+', ' ', text)\n self.text = text\n\n return self.text", "def GetContent(self):\n random_header = random.choice(self.my_headers)\n req = urllib2.Request(self.url)\n req.add_header(\"User-Agent\", random_header)\n req.add_header(\"Host\", self.host)\n req.add_header(\"Referer\", self.referer)\n req.add_header(\"GET\", self.url)\n try:\n html = urllib2.urlopen(req)\n content = html.read()\n# print html.info()\n html.close()\n return content\n except:\n print \"Something wrong is happening in get_content!\"", "def get_content(self):\n response = requests.get(self.url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n return soup", "def getHtml(_url):\n try:\n logger.info('getHtml: Requesting: %s' % _url)\n\n response = urllib2.urlopen(_url)\n\n #download data\n html_ = response.read()\n logger.debug('getHtml: Retrieved data: %s' % html_)\n\n return html_\n\n except urllib2.HTTPError, e:\n logger.error('getHtml: HTTPError: ' + str(e.code))\n\n except urllib2.URLError, e:\n logger.error('getHtml: URLError: ' + str(e.reason))\n\n except httplib.HTTPException, e:\n logger.error('getHtml: HTTPException: ', str(e))\n\n except Exception:\n logger.exception('getHtml: Unhandled exception: ')", "def raw_html(self):\n if self._html:\n return self._html\n else:\n return lxml.html.tostring(self.element, encoding=self.encoding)", "def get_webpage_content(url):\n request = urllib2.Request(url)\n page = urllib2.urlopen(request)\n soup = BeautifulSoup(page.read())\n return unicode(soup)", "def html(self):\n if self._html:\n if not self._useDefaultDecoder:\n LOGGER.info(\"Using Forced Encoding %r on raw_html!\" % self.encoding)\n return self.raw_html.decode(self.encoding, errors='xmlcharrefreplace')\n else:\n LOGGER.info(\"Using default Codec on raw_html!\")\n return self.decode_html(self.raw_html)\n else:\n return lxml.html.tostring(self.element, encoding='unicode').strip()", "def retrieve_content(self, url):\n page = requests.get(url)\n content = page.content\n return content", "def download_simple(url): # url(str)\n html = urlopen(url).read().decode()\n return html", "def fetch_document(self, url: str) -> bytes:\n self.html_document = b''\n try:\n response = requests.get(url, headers=self.headers)\n response.raise_for_status()\n self.html_document = response.content\n logger.info('web page {0} fetched with status code: {1}'.format(url, response.status_code))\n return self.html_document\n except requests.exceptions.RequestException:\n logger.exception('Exception raised in Scraper.fetch_document()')\n raise", "def html(self):\n return self._html", "def download_html(url: str):\n response = urllib.request.urlopen(url)\n return response.read()", "def get_html(url):\n\n r = requests.get(url, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'\n })\n html = r.text\n\n return html", "def fetch(self, url: furl) -> str:\n try:\n contents = self._download(url)\n except requests.ConnectionError as err:\n logger.exception(f\"Request failed with {err}\")\n click.secho(\n f\"The URL {url} could not be downloaded. Either your network is unreachable or the URL is broken.\"\n f\" Check the URL, fix your connection, or use \"\n f\" {OptionEnum.OFFLINE.as_flake8_flag()} / {OptionEnum.OFFLINE.as_envvar()}=1\",\n fg=\"red\",\n err=True,\n )\n return \"\"\n return contents", "def _fetch(url, ssl_verify = True):\n req = Request(url)\n if ssl_verify:\n page = urlopen(req)\n else:\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n page = urlopen(req, context=ctx)\n content = page.read().decode('utf-8')\n page.close()\n return content", "async def http_get(self, url, ignore_errors=False):\n self.logger.debug(\"HTTP GET %s\", url)\n code, header, body = await fetch(\n url,\n request_timeout=config.activator.http_request_timeout,\n follow_redirects=True,\n validate_cert=config.activator.http_validate_cert,\n eof_mark=b\"</html>\",\n )\n if 200 <= code <= 299:\n return smart_text(body, errors=\"replace\")\n elif ignore_errors:\n metrics[\"error\", (\"type\", f\"http_error_{code}\")] += 1\n self.logger.debug(\"HTTP GET %s failed: %s %s\", url, code, body)\n return smart_text(header, errors=\"replace\") + smart_text(body, errors=\"replace\")\n else:\n metrics[\"error\", (\"type\", f\"http_error_{code}\")] += 1\n self.logger.debug(\"HTTP GET %s failed: %s %s\", url, code, body)\n return None", "def get_html(self):\r\n pass", "def rawHTML(self):\n #TODO : do checking for scripts and hacks here?\n return mark_safe(self.html)", "def get_html(self, path, data=None, follow=False, **extra):\r\n return self.get(path, data or {}, follow, HTTP_ACCEPT=\"text/html\", **extra)", "def rawHTMLrendered(self):", "def get_raw_data(self):\n return self.HTML", "def get_html(url):\n req = urllib.request.Request(\n url,\n headers={\n 'User-Agent': 'Python Learning Program',\n 'From': 'hklee310@gmail.com'\n }\n )\n resp = urllib.request.urlopen(req)\n\n if resp.code == 200:\n return resp.read() # returns the html document\n else:\n return None", "def getHtml(self):\n return self.html", "def load_page(url: str) -> str:\n try:\n response = urlopen(url)\n\n if response.status == 200:\n body_text = str(response.read())\n return body_text\n return \"\"\n except URLError:\n return \"\"", "def get_page_html(url: str) -> Union[int, str]:\n req = requests.get(url=url)\n if req.status_code == 200:\n return req.text\n raise requests.exceptions.RequestException('')", "def get_html(self):\r\n return u'This is supposed to be test html.'", "def _html(url: str) -> BeautifulSoup:\n with urllib3.PoolManager() as manager:\n res = manager.request(\"GET\", url, headers={\"User-Agent\": ua.chrome})\n if res.status != 200:\n raise Exception(res.status)\n soup = BeautifulSoup(res.data, \"html.parser\")\n return soup", "async def fetch_page(self, url: str) -> PageRaw:\n\n raise NotImplementedError()", "def read_html(url: str) -> BeautifulSoup:\n try:\n response = requests.get(url, stream=True)\n status_code = response.status_code\n content_type = response.headers[\"Content-Type\"].lower()\n except requests.RequestException as e:\n raise RuntimeError(f\"Error during requests to {url} : {str(e)}\")\n else:\n if (\n status_code == 200\n and content_type is not None\n and content_type.find(\"html\") > -1\n ):\n return BeautifulSoup(response.content, \"html.parser\")", "def getHtml(url):\n log.finer(\" Opening URL: %s\" % url)\n handle = MozURLopener().open(url)\n html = handle.read()\n handle.close()\n return html", "def get_raw_data(url):\n\n req = requests.get(url, stream=True)\n req.raw.decode_content = True\n return req.raw", "def html(self) -> str:\n return self._html", "def get_intel_html(url):\r\n USER_AGENT = (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36\")\r\n headers = {\r\n 'User-Agent': USER_AGENT,\r\n }\r\n r = requests.get(url, headers=headers)\r\n return r.text", "async def html(\n self, *, encoding: Optional[str] = None, errors: str = \"strict\") -> str:\n return await self._aws_text(encoding=encoding, errors=errors)", "def get_html(self):\n if not self.id_video or not self.original_url or not self.xml_response:\n return ''\n return self.xml_response.find('html').text", "def fetch(url):\n content = requests.get(url).text\n if \"Error\" in content:\n raise ValueError(f\"Cannot read from: {url}\")\n return content", "def get_html(self, *args, **kwargs):\n return Text(self.get_data(*args, **kwargs), escape=False)", "def convert_content(self, html):\n\n try:\n dom = BeautifulSoup(html, 'html.parser')\n return self.parse_content(dom)\n except:\n return html", "def http_get_contents(url) -> str:\n\n # Clean url\n url = str(url).strip('\\\\')\n url = str(url).strip('\\n')\n\n try:\n # Fixed SSL bug on MacOS: /Applications/Python\\ 3.8/Install\\ Certificates.command\n http = urllib3.PoolManager()\n http_response = http.request('GET', url, timeout=5)\n http_response_content = http_response.data\n\n if http_response.status == 200:\n return http_response_content.decode('utf-8')\n\n return ''\n\n # pylint: disable=W0703\n except Exception as error:\n # pylint: disable=W1202\n LOGGER.error('Error. Could not connect to: {0}. Error message: {1}'.format(url, error))\n\n return ''", "def load_page(url):\n try:\n response = urllib2.urlopen(url)\n html = response.read()\n\n if response.code == 200:\n body_text = html\n return html\n return \"\"\n except Exception:\n return \"\"", "def _soup(self, url):\n r = self.session.get(url)\n r.raise_for_status()\n html = Soup(r.text, 'lxml') # lxml is fastert than html.parser\n r.close()\n return html", "def read_url(self, url: str) -> str:\n return requests.get(url, headers=self.headers).text", "def request(self, url):\r\n\r\n req = self.get(url)\r\n soup = BeautifulSoup(req.content, \"lxml\")\r\n return soup", "def get(self, url):\n \n content = \"\"\n if hasattr(http.client, \"HTTPSConnection\"): \n url_options = urlparse(url)\n\n conn = http.client.HTTPSConnection(url_options.netloc)\n conn.request('GET', url_options.path + '?' + url_options.query)\n content = conn.getresponse().read().decode('utf-8')\n conn.close()\n else: \n p = os.popen('curl -k \"' + url + '\"')\n content = p.read()\n p.close() \n\n return content", "def get_raw_page(self, URL, force_reload=False):\n\n if not self.valid_cache(URL) or force_reload:\n\n HEAD = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/201'}\n INFO_REQUEST = Request(URL, headers=HEAD)\n PAGE = urlopen(INFO_REQUEST).read()\n\n self.save_cache(PAGE, URL)\n\n else:\n PAGE = self.load_cache(URL)\n\n PAGE = str(PAGE, encoding='utf-8')\n\n return PAGE", "def html(self) -> SafeString:\n return format_html(self.__html__())", "def html(self) -> SafeString:\n return format_html(self.__html__())", "def _setContentFromUrl(self, url):\n urlgrabber = UrlGrabber(url)\n self._htmlContent = urlgrabber.get()", "def _get_site_html(url):\n\n hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Ge\\cko)' \n 'Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n\n try:\n req = urllib2.Request(url, headers = hdr)\n source = urllib2.urlopen(req).read()\n except:\n source = []\n\n return source", "def htmlStream(self):\n return io.BytesIO(self.html)", "def load_page(self) -> bs4.BeautifulSoup:\n\n res = requests.get(self.url)\n\n res.raise_for_status()\n return bs4.BeautifulSoup(res.text, 'html.parser')", "def get_html_source(url):\n # import urllib\n try:\n sock = urllib.urlopen(url)\n html_source = sock.read()\n sock.close()\n return html_source\n except IOError:\n print \"IOError: Not a valid URL\"", "def get_page_html(self, xblock):\r\n url = xblock_studio_url(xblock)\r\n self.assertIsNotNone(url)\r\n resp = self.client.get_html(url)\r\n self.assertEqual(resp.status_code, 200)\r\n return resp.content", "def get_url(url):\r\n response = requests.get(url)\r\n content = response.content.decode(\"utf8\")\r\n return content", "def load_url_content(url):\n try:\n r = requests.get(url)\n if r.ok:\n return r.text\n else:\n return None\n except Exception:\n return None", "def fetch(url, user_agent=\"django-oembed/0.1\"):\r\n request = urllib2.Request(url)\r\n request.add_header('User-Agent', user_agent)\r\n request.add_header('Accept-Encoding', 'gzip')\r\n opener = urllib2.build_opener()\r\n f = opener.open(request)\r\n result = f.read()\r\n if f.headers.get('content-encoding', '') == 'gzip':\r\n result = gzip.GzipFile(fileobj=StringIO(result)).read()\r\n f.close()\r\n return result", "def fetch_content(self, url):\n # log.debug(\"Fetching content from: %s\", url)\n prepare_curl_callback = lambda x: x.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_GSSNEGOTIATE)\n self.http.fetch(url, self.handle_response, prepare_curl_callback=prepare_curl_callback, auth_username=':')", "def get_html(city):\n r = requests.get(city.data_url, headers=HEADERS)\n\n # Requests fails to correctly check the encoding for every site,\n # we're going to have to get that manually (in some cases). This sucks.\n soup = BeautifulSoup(r.text, \"html.parser\")\n meta_content = soup.find(\"meta\", {\"http-equiv\": \"content-type\"})\n if meta_content is not None:\n encoding = meta_content[\"content\"].split(\"=\")[-1]\n r.encoding = encoding\n\n return r.text", "def get_html(website_url):\n\n website_response = requests.get(website_url, headers=headers_req)\n if website_response.status_code != requests.codes.ok:\n raise SiteUnreachableException()\n return BeautifulSoup(website_response.content, 'html.parser')", "def url_fetch(self, url):\n user_agent = random.choice(self.conf.user_agents)\n if self.isCompress == True:\n headers = {\n 'Uesr-Agent': user_agent,\n \"Accept-Encoding\": \"gzip,deflate\",\n \"Accept-Charset\" : \"UTF-8,*\"\n }\n else:\n headers = {\n 'Uesr-Agent': user_agent,\n \"Accept-Charset\" : \"UTF-8,*\"\n }\n raw_data = ''\n try:\n conn = httplib.HTTPConnection(self.proxy, timeout=3.0)\n conn.request('GET', url, None, headers)\n response = conn.getresponse()\n raw_data = response.read()\n except Exception as err:\n self.logger.error('connect error[%s]' % err)\n return '999', 'Request failed', ''\n finally:\n conn.close()\n \n content = ''\n if self.isCompress == True:\n if response.status == 200:\n try:\n stream = StringIO.StringIO(raw_data)\n decompressor = gzip.GzipFile(fileobj=stream)\n content = decompressor.read()\n except:\n self.logger.error('status[%s] len_raw_data[%d]' % (response.status, len(raw_data)))\n return '998', 'content err', ''\n else:\n if response.status == 200:\n content = raw_data \n\n return response.status, response.reason, content", "def get_page(self, url):\n \"\"\" @param url: Url we want to crawl\"\"\"\n \"\"\" @type url: String \"\"\"\n \"\"\"@return the page\"\"\"\n try:\n u = urlopen(url)\n html = u.read().decode('utf-8')\n # except Exception as e:\n # logging.exception(e)\n finally:\n print(\"Closing\")\n u.close()\n return html", "def get_html_from_rst(rst):\n\n compiler = nikola.plugins.compile.rest.CompileRest()\n compiler.set_site(FakeSite())\n return compiler.compile_string(rst)[0]", "def fetch_m3u(self, m3u_url: str) -> bytes:\n m3u_content = requests.get(url=m3u_url).content\n self._logger.debug('M3U8 content: {}'.format(m3u_content))\n return m3u_content", "def raw_url(self) -> str:\n return self.url_as(raw=True)", "def encode_html(self):\n html_temp = self._html\n try:\n html_temp = html_temp.split('<div class=\"rich-text\">')[1].rsplit(\n \"</div>\", 1\n )[0]\n except IndexError:\n pass\n return b64encode(html_temp.encode(\"utf-8\")).decode(\"utf-8\")", "def get_data(self, *args, fix_links=False, **kwargs):\n response = self.get(*args, **kwargs)\n if getattr(response, \"url\", None):\n return self.get_data(response.url, fix_links=fix_links)\n\n if fix_links:\n soup = self.get_soup(*args, fix_links=True, **kwargs)\n return str(soup)\n return response.content.decode(response.charset)", "def get_text_content(url: str) -> str:\n\n url = _fix_url(url)\n\n return get(url).text", "def content(self) -> str:\n return pulumi.get(self, \"content\")", "def content(self) -> str:\n return pulumi.get(self, \"content\")", "def content(self) -> str:\n return pulumi.get(self, \"content\")", "def text(self) -> str:\n # Access self.encoding before self._cached_text, because\n # there is a chance self._cached_text would be already populated\n # while detecting the encoding\n encoding = self.encoding\n if self._cached_text is None:\n fake_content_type_header = f\"charset={encoding}\"\n encoding, text = html_to_unicode(fake_content_type_header, self.body)\n self._cached_text = text\n return self._cached_text", "def get_song_html(self, url):\n request = urllib.request.Request(url)\n request.add_header(\"Authorization\", \"Bearer \" + self.client_access_token)\n request.add_header(\"User-Agent\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'\")\n\n page = urllib.request.urlopen(request)\n html = BeautifulSoup(page, \"lxml\")\n print(\"Scraped: \" + url)\n return html", "def read_web(url):\n f = urllib.request.urlopen(url)\n contents = f.read()\n return contents", "def get_html_no_js(url):\n r = requests.get(url)\n\n # with open('res.html','w',encoding=r.encoding) as f:\n # f.write(r.text)\n\n return r.text", "def html_text(self):\n return g.markdown_wiki.convert(self.data.text)", "def get_text_hook(raw):\n soup = bs4.BeautifulSoup(quopri.decodestring(raw), features=\"lxml\")\n return soup.text", "def get_article(self, url):\n response = self.opener.open(url)\n doc = lxml.html.document_fromstring(response.read())\n content = doc.find_class(\"post\")[0] # Select content by CSS class \n cleaned_content = clean_html(content)\n str_cleaned_content = lxml.html.tostring(cleaned_content)\n # self.__save_article_to_file(str_cleaned_content)\n return str_cleaned_content", "def geturl(self) -> str:\n\n req = request.Request(url=self._url, headers=self._headers)\n with request.urlopen(req) as f:\n return f.read().decode('utf-8', 'ignore')" ]
[ "0.8074977", "0.7284636", "0.7092009", "0.7025183", "0.69793874", "0.69557333", "0.6916234", "0.68908453", "0.68863857", "0.6852996", "0.68447953", "0.68049586", "0.6689668", "0.6678838", "0.6672157", "0.66424465", "0.6611915", "0.6605699", "0.6543702", "0.65425205", "0.65123165", "0.65114564", "0.6503601", "0.6500038", "0.64915407", "0.64706653", "0.64409333", "0.6435555", "0.64343536", "0.6370093", "0.63440675", "0.63294923", "0.63181156", "0.63115627", "0.63053495", "0.62594867", "0.62401235", "0.6209968", "0.6196874", "0.6184748", "0.6177475", "0.6172736", "0.6169422", "0.6167966", "0.6165147", "0.6164422", "0.6147359", "0.61459804", "0.6145045", "0.61398786", "0.61158746", "0.6104518", "0.608641", "0.6085008", "0.6076328", "0.606708", "0.6066992", "0.6049224", "0.6039321", "0.6035962", "0.6030927", "0.6021415", "0.6013356", "0.60012835", "0.59969693", "0.59909195", "0.59835094", "0.5976562", "0.5976562", "0.59553206", "0.5949492", "0.5945289", "0.5934379", "0.5928015", "0.5906337", "0.5905982", "0.5903767", "0.5899239", "0.5899052", "0.5896059", "0.5859029", "0.5832675", "0.5823584", "0.581571", "0.5802279", "0.57977074", "0.57943034", "0.57933444", "0.5779525", "0.57753634", "0.57753634", "0.57753634", "0.5773988", "0.57573473", "0.5751551", "0.57496065", "0.57417315", "0.57266194", "0.5725056", "0.5719415" ]
0.69644094
5
Extract text and other things from the raw_html for this document.
def extract(self, doc, raw_html): super(KenyaTodayCrawler, self).extract(doc, raw_html) soup = BeautifulSoup(raw_html) # gather title doc.title = soup.find(attrs={"property":"og:title"})['content'] #gather publish date date = self.extract_plaintext(soup.select("main.content .entry-meta .entry-time")) doc.published_at = self.parse_timestamp(date) nodes = soup.select(".content .entry-content p") self.log.info(nodes) if len(nodes) > 1: doc.summary = self.extract_plaintext(nodes[0:1]) doc.text = "\n\n".join(p.text.strip() for p in nodes[2:]) doc.author = Author.unknown()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_all_text(self, url, html_doc):\n self.title_text = self.get_title_words(html_doc)\n self.meta_text = self.get_meta_words(html_doc)\n self.url_text = self.get_url_words(url)\n self.heading_text = self.get_heading_words(html_doc)\n self.body_text = self.get_body_words(html_doc)", "def get_text_hook(raw):\n soup = bs4.BeautifulSoup(quopri.decodestring(raw), features=\"lxml\")\n return soup.text", "def _get_text(raw_html):\n bs = BeautifulSoup(raw_html)\n text_nodes = bs.find_all(_is_text_tag)\n text_elements = [_get_child_text(node) for node in text_nodes]\n return ' '.join(chain(*chain(*text_elements)))", "def process_html(raw_html_text):\n\tbounds_list = pre_proc.get_page_bounds(raw_html_text)\n\n\tprocessed_text_html = ( pre_proc.split_spans(raw_html_text) \t| p(pre_proc.delete_non_textual_elements)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.delete_headers, bounds_list)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.delete_vertical_text)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.sort_html)\n\t\t)\n\treturn processed_text_html", "def extract(content):\n parser = MyHTMLParser()\n parser.textBody = []\n parser.feed(content)\n textBody = parser.textBody\n textBody = \" \".join(textBody)\n textBody = textBody.replace('\\xa0', \" \")\n return textBody.strip()", "def parseSearchHtml(self):\n pass", "def parseSearchHtml(self):\n pass", "def extract_raw_text(soup, url):\n \n title_class = \"nom-notice\"\n title = soup.find(class_=title_class)\n raw_infos = {}\n raw_infos['name'] = title.contents[0].replace(u'\\xa0', ' ')\n \n notice = soup.find(class_=\"notice\")\n \n summary = notice.find(class_=\"chapo\")\n if summary is not None:\n first_para = summary.find_all('p', recursive=False)[-1]\n first_para.tag = 'div'\n first_para['class'] = 'summary'\n raw_infos['summary'] = unicode(first_para)\n \n else:\n raw_infos['summary'] = unicode('')\n\n article = notice.find(class_='texte')\n if article is not None:\n article['class'] = 'article'\n raw_infos['article'] = unicode(article)\n \n sources = notice.find(class_='sources')\n raw_infos['sources'] = unicode(sources)\n \n works = notice.find(class_='oeuvres')\n if works is not None:\n works['class'] = 'works'\n raw_infos['works'] = unicode(works)\n \n # In function that writes, encode everything to bytes! .encode('utf-8')\n return raw_infos", "def extract_page_text(html):\n soup = bs4.BeautifulSoup(html)\n\n # Remove <script/> and <style/> content\n for script in soup([\"script\", \"style\"]):\n script.extract()\n\n text = soup.get_text()\n\n # Strip leading and trailing whitespace from each line, then join all the\n # non-empty lines together.\n lines = (line.strip() for line in text.splitlines())\n text = '\\n'.join(line for line in lines if line)\n\n return text", "def raw_text(self):\n\t\t\n\t\t #eliminating more headers\n\t\traw_text = re.sub(r\".*OPERATIONS O[PF].*\",r\"\",self.doc)\n\t\traw_text = re.sub(r\"Page \\d+\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*B[lL]OCK.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*WEST GULF.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*NAVAL FORCES ON.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\"\\s\",r\" \", raw_text) #eliminating tabs etc. \t \t \n\t\treturn raw_text", "def _get_text(self, remove_newlines=True):\n if not self.text:\n url = self.url\n try:\n self.log.debug(\"Try to get content from page {}\".format(url))\n r = requests.get(url)\n except requests.exceptions.RequestException as e:\n self.log.warn(\"Unable to get page content of the url: {url}. \"\n \"The reason: {exc!r}\".format(url=url, exc=e))\n raise ParsingError(e.strerror)\n\n ud = UnicodeDammit(r.content, is_html=True)\n\n enc = ud.original_encoding.lower()\n declared_enc = ud.declared_html_encoding\n if declared_enc:\n declared_enc = declared_enc.lower()\n # possible misregocnition of an encoding\n if (declared_enc and enc != declared_enc):\n detect_dict = chardet.detect(r.content)\n det_conf = detect_dict[\"confidence\"]\n det_enc = detect_dict[\"encoding\"].lower()\n if enc == det_enc and det_conf < THRESHOLD_OF_CHARDETECT:\n enc = declared_enc\n # if page contains any characters that differ from the main\n # encoding we will ignore them\n content = r.content.decode(enc, \"ignore\").encode(enc)\n htmlparser = etree.HTMLParser(encoding=enc)\n root = etree.HTML(content, parser=htmlparser)\n etree.strip_elements(root, html.etree.Comment, \"script\", \"style\")\n text = html.tostring(root, method=\"text\", encoding=\"unicode\")\n\n if remove_newlines:\n self.log.debug(str(type(text)))\n text = re.sub('\\s+', ' ', text)\n self.text = text\n\n return self.text", "def extract_page_text(self, bs_object):\n\n # kill all script and style elements\n for script in bs_object([\"script\", \"style\", \"head\"]):\n script.extract() # rip it out\n\n # get text\n text = bs_object.get_text()\n\n # break into lines and remove leading and trailing space on each\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text_list_gen = (chunk for chunk in chunks if chunk)\n text_list = list(text_list_gen)\n # print \"TEXT LIST >>>\\n\", text_list\n \n return text_list", "def parseHtmlText(text):\n # text processing\n raw = BeautifulSoup(text.text, 'html.parser').get_text()\n nltk.data.path.append('./nltk_data/') # set the path\n tokens = nltk.word_tokenize(raw)\n text = nltk.Text(tokens)\n # remove punctuation, count raw words\n nonPunct = re.compile('.*[A-Za-z].*')\n raw_words = [w for w in text if nonPunct.match(w)]\n raw_word_count = Counter(raw_words)\n # stop words\n no_stop_words = [w for w in raw_words if w.lower() not in stops]\n no_stop_words_count = Counter(no_stop_words)\n return raw_word_count, no_stop_words_count", "def extractText(postSoup):\n for tag in postSoup.findAll(True):\n if tag.name in (\"code\"):\n tag.extract()\n else:\n tag.hidden=True\n\n return postSoup.renderContents()", "def process_doc_html(self, doc_in):\n self.feed(doc_in) #SGMLParser call\n self.close() #SGMLParser call\n self.hand_off_temp_pieces('to_doc_pieces')\n self.all_pieces = self.all_pieces[:-16] # drop </body></html>\n return self.all_pieces", "def extractText(text):\n soup = BeautifulSoup(text, 'html.parser')\n for code in soup.find_all('code'):\n code.decompose()\n return soup.get_text()", "def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html", "def extractText(html_code):\n html_tree = html.fromstring(html_code)\n chapter_list = html_tree.find_class(\"chapter\")\n chapter_text = chapter_list[0].text_content()\n return chapter_text", "def _html(self, text):\r\n html = URL_REGEX.sub(self._parse_urls, text)\r\n html = USERNAME_REGEX.sub(self._parse_users, html)\r\n html = LIST_REGEX.sub(self._parse_lists, html)\r\n return HASHTAG_REGEX.sub(self._parse_tags, html)", "def extract_text(soup, result):\n if soup:\n for t in soup.children:\n if type(t) == NavigableString:\n # Text content node\n result.append(t)\n elif isinstance(t, NavigableString):\n # Comment, CDATA or other text data: ignore\n pass\n elif t.name in whitespace_tags:\n # Tags that we interpret as whitespace, such as <br> and <img>\n result.append_whitespace()\n elif t.name in block_tags:\n # Nested block tag\n result.begin() # Begin block\n extract_text(t, result)\n result.end() # End block\n elif t.name not in exclude_tags:\n # Non-block tag\n extract_text(t, result)", "def HTMLparser(self):\n soup = self.getHTML()\n \n # Sort through all the text in the html:\n for text in soup.find_all('p'):\n try:\n paragraphNo = int(text.parent.p['id'][14:])\n \n # Only grab paragraphs in \"On the Social Contract\"\n if paragraphNo < self.START_PARAGRAPH or paragraphNo > self.END_PARAGRAPH:\n continue\n \n elif text.string:\n \n # Ignore those \"paragraphs\" in the html that simply outline different chapters/books\n if re.search('^(CHAPTER|BOOK)(.*):', text.string):\n continue\n \n else:\n \n # Want to read in the document by sentence (for RousseauBot to use individually later on)\n tempList = re.split('(?<!etc)\\.\\s(?!.*\\\")|\\!', text.string)\n for sentence in tempList:\n \n # When a \"paragraph\" is just a single sentence, re's .split() returns the sentence and a ''\n # Also, remove overly long quotes - Twitter has char limit\n if sentence != '' and len(sentence.strip()) < self.TWITTER_LIMIT:\n self.quotes.append(sentence.strip())\n \n except KeyError:\n \n # BS throws KeyError when <p>'s id field is blank; ignore - all paragraphs I need has an id\n continue", "def parsed_html():\n return utils.parse_html(\n \"\"\"\n <!doctype hmtl>\n <html>\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width\">\n <title>Page title</title>\n <link rel=\"stylesheet\" href=\"/static/styles.css\" />\n </head>\n <body>\n <h1>Django Auto AMP</h1>\n <p>Generate automatic AMP from your Django templates</p>\n <img src=\"/static/img.jpg\" width=\"500\" height=\"300\" />\n <img src=\"/static/img.gif\" layout=\"nodisplay\" />\n <img src=\"/static/img.png\" />\n <script type=\"text/javascript\" src=\"/static/scripts.js\" />\n <script type=\"application/json\" src=\"/static/data.json\" />\n </body>\n </html>\n \"\"\"\n )", "def extract_text(html, guess_punct_space=True):\n sel = cleaned_selector(html)\n return selector_to_text(sel, guess_punct_space=guess_punct_space)", "def _html_text(self, html):\n ee = None\n try: return html.html_text()\n except Exception, e: ee = e; pass\n try: return html.xml_text()\n except Exception, e: print \"HtmlDocument/text\", ee, e; pass\n try: return str(html)\n except Exception, e: print \"HtmlDocument/text\", e; return \"&nbsp;\"", "def _strip_excerpt(self, raw_html):\n clean_regex = re.compile(\"<.*?>\")\n clean_text = re.sub(clean_regex, \"\", raw_html)\n return html.unescape(clean_text).replace(\"\\n\", \"\")", "def ExtractText(self, zhtmlstring):\n # Not defined in init due to Python2/Python3 complications.\n # pylint: disable=attribute-defined-outside-init\n self._text = []\n self.feed(zhtmlstring)\n self.close()\n self._text = [line.strip() for line in self._text]\n return ' '.join(self._text)", "def convert_content(self, html):\n\n try:\n dom = BeautifulSoup(html, 'html.parser')\n return self.parse_content(dom)\n except:\n return html", "def from_html(self, content):\r\n pass", "def parse_source(html, encoding='utf-8'):\n return BeautifulSoup(html, from_encoding=encoding)", "def html_to_text(html):\n s = TextExtractorHTMLParser()\n s.feed(html)\n return s.get_text()", "def extractContent(content):\n soup = BeautifulSoup(content, 'html.parser')\n return soup.get_text()", "def scrubHTML( html ):\n parser = StrippingParser()\n parser.feed( html )\n parser.close()\n return parser.result", "def remove_html( html):\n return html2txt(html)", "def htmlExtractPart(page, tag, attrs):\n try:\n htmlParsePage(page)\n except UnicodeEncodeError:\n logging.warn('could not parse html')\n return page['data']\n\n bs = page['parsedHtml']\n el = bs.find(tag, attrs=attrs)\n if el != None:\n logging.debug('Successfully stripped html')\n return str(el)\n else:\n logging.debug('Could not strip html')\n return page['data']\n return", "def html_text(self):\n return g.markdown_wiki.convert(self.data.text)", "def extract_answer_from_html(self, html):\n if html.strip().startswith('<'):\n soup = bs4.BeautifulSoup(html, 'html.parser')\n\n for p in soup.find_all('p'):\n if self.REPLY_RE.match(p.text):\n for el in list(p.previous_elements):\n if isinstance(el, bs4.element.Tag):\n el.decompose()\n p.decompose()\n break\n\n return str(soup)\n else:\n # plain text\n match = self.REPLY_RE.search(html)\n if match:\n return html[match.end(0):]\n\n return html", "def _extract_kiss_text(self, raw_slice):\n self.text = self.frame[raw_slice + 3:]", "def rawHTMLrendered(self):", "def extract(self, document):\n raise NotImplementedError('FeatureExtractorBase:extract(self, text) is not defined')", "def text_from_html(body):\n soup = BeautifulSoup(body, \"html.parser\")\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n return \" \".join(t.strip() for t in visible_texts)", "def get_body_text(self):\n if self.body_type != 'HTML':\n return self.body\n\n try:\n soup = bs(self.body, 'html.parser')\n except RuntimeError:\n return self.body\n else:\n return soup.body.text", "def parse_article_html(page_resp):\n article_url = page_resp.url\n \n article_page_soup = bs4.BeautifulSoup(page_resp.text, \"lxml\")\n \n title_html = article_page_soup.find_all(\"h1\")[0]\n title_text = title_html.contents[0]\n \n date = article_page_soup.find_all(\"small\", {'class': 'gray'})[0]\n date_text = date.contents[4].replace(\" \", \"\").split(\"\\n\")[3][:10]\n \n article_content = article_page_soup.find_all(\"div\", {'class': 'rich_media_content'})[0]\n article_text = article_content.get_text('\\n')\n is_original = check_if_original(article_content) or '[原创]' in title_text\n \n return {\n 'title': title_text,\n 'date': date_text,\n 'url': article_url,\n 'is_original': is_original,\n 'text': article_text\n \n}", "def parsed_html_lean():\n return utils.parse_html(\n \"\"\"\n <!doctype hmtl>\n <html>\n <head>\n <title>Page title</title>\n </head>\n <body>\n <h1>Django Auto AMP</h1>\n <p>Generate automatic AMP from your Django templates</p>\n </body>\n </html>\n \"\"\"\n )", "def extract(self):\n\n self.decode(CMS_object.extract(self))\n return self.get_content()", "def _get_plain_text(self, url, soup, site):\n print('Get plaint text: ' + url)\n title = str(soup.find(class_=self._title_tags[site]))\n content = str(soup.find(class_=self._content_tags[site]))\n # h = html2text.HTML2Text() # uncomment this segment of code\n # h.ignore_links = True # if you want to get plain text\n # h.ignore_images = True\n # title = h.handle(title)\n # content = h.handle(content)\n if title == None or content == None:\n print('Different website structure: ' + url)\n return ''\n return self._clean(title + content, no_punc=True) # with symbols\n # return title + content # without symbols", "def _extract_data(self,data,tag=None,cssid=None,cssclass=None,attrs=None,regexp=None,index=0):\n \n# cssclass = \"song\"\n# cssid = \"newsTable0\"\n# tag = \"div\"\n# import pdb\n# pdb.set_trace() \n \n if cssid: \n searchconstrain = SoupStrainer(tag, id=cssid)\n elif cssclass:\n searchconstrain = SoupStrainer(tag, attrs={\"class\":cssclass}) \n else:\n if isinstance(attrs, unicode):\n try:\n attrs = attrs.encode('utf-8')\n regexp = regexp.encode('utf-8')\n except:\n pass \n searchconstrain = SoupStrainer(tag, attrs={attrs:re.compile(regexp)})\n\n soup = BeautifulSoup(data,parseOnlyThese=searchconstrain)\n rslist = [ tp for tp in soup ]\n return rslist[index]", "def extract_code_text(bs, index):\r\n texts = ''\r\n code = ''\r\n try:\r\n for a in bs.find_all('div', class_='post')[index].find_all('p'):\r\n texts += a.text\r\n for a in bs.find_all('div', class_='post')[index].find_all('code'):\r\n code += a.text\r\n except:\r\n pass\r\n return texts, code", "def __init__(self, html_contents):\n self.doc = html.document_fromstring(html_contents)", "def body(self, response):\t\n\t\tx = response.xpath(\"//div[@class='story-content row-fluid']/p/text()\").extract()\n\n\t\tfor i in range(0,len(x)):\n\t\t\tx[i] = x[i].strip(\"\\r\\n\\t\")\n\t\treturn x", "def get_html_part(parts):\n for part in parts:\n if part[\"mimeType\"] == \"text/html\":\n return part[\"body\"][\"data\"]\n return \"\"", "def extract_text(self, record):\n # type: (Element) -> str\n cdm_struc = Fields.cdm_structural_elements\n structure_el = record.find(cdm_struc['compound_object_container'])\n pages_el = structure_el.iterfind('.//' + cdm_struc['compound_object_page'])\n fulltext = ''\n for page in pages_el:\n page_el = page.find(cdm_struc['compound_object_page_text'])\n if page_el is not None:\n if page_el.text is not None:\n page_text = Utils.correct_text_encoding(page_el.text)\n fulltext += page_text\n return fulltext", "def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()", "def _get_raw_html(self):\n buffer = BytesIO()\n c = pycurl.Curl()\n c.setopt(c.URL, self.url)\n c.setopt(c.WRITEDATA, buffer)\n c.perform()\n c.close()\n return buffer.getvalue()", "def parsingconvtext(retrievedtext,customtextlist):\r\n if not retrievedtext: #in case empty text \r\n retrievedtext=changenonetostr(retrievedtext)\r\n newtext=BeautifulSoup(retrievedtext).get_text() \r\n #newtext=changenonetostr(retrievedtext)\r\n #newtext=BeautifulSoup(newtext).get_text() \r\n #remove http links\r\n newtext=re.sub(r'http\\S+', '', newtext)\r\n newtext=re.sub(r'\\r\\r\\r\\n', ' ', newtext)\r\n #remove LL specific text\r\n if customtextlist:\r\n for i in customtextlist:\r\n newtext=re.sub(i, '', newtext)\r\n return newtext", "def parse_html(html):\n cleanupstring = \"https://en.wikipedia.org/wiki/Wikipedia:Cleanup\"\n if cleanupstring in html:\n return None\n soup = BeautifulSoup(html, 'html.parser')\n soup = soup.contents[0]\n node = parse_to_quotes(soup.contents[0])\n quotes = []\n # quotes under these titles likely aren't actually by the individual\n blacklist = ['Disputed', 'Attributed',\n 'Misattributed', 'Quotes about',\n 'Quotations about',\n 'Quotations regarding', 'See also', 'References',\n 'Posthumous attributions', 'About', 'Criticism']\n # parse each section until reaching the External links section\n while not (node is None or (node.name == 'h2' and node.span.get_text() == \"External links\")):\n blacklisted = False\n for title in blacklist:\n if node.span.get_text().startswith(title):\n blacklisted = True\n if blacklisted:\n s = Section(node)\n node = s.end.next_sibling\n else:\n s = Section(node)\n s.propagate_source()\n quotes = quotes + s.collect_quotes()\n node = s.end.next_sibling\n return quotes", "def _npgStripExtra(self, htmlStr):\n lines = htmlStr.splitlines()\n start, end = (0, 0)\n for i, line in enumerate(lines):\n if '<article>' in line and start != 0:\n start = i\n if '</article>' in line and end != 0:\n end = i\n\n if start != 0 and end != 0 and end > start and end - start > 10 and end < len(lines):\n logging.log(5, 'stripping some extra html')\n return ''.join(lines[start:end + 1])\n else:\n return htmlStr", "def __yahoo_parse_text(self, content):\n text = ''\n # Process all paragraphs.\n paragraphs = content.find_all('p')\n for par in paragraphs:\n text += '<p>' + par.getText(separator=' ') + '</p>'\n # Remove all extra whitespace (single space remains).\n text = ' '.join(text.strip().split())\n # Result\n return text", "def parse_html(self):\n if self.file_extension == '.czm': # Caso de fichero comprimido czm.\n folder_path = extract_file(self.input_file) # Descomprime el archivo de entrada.\n self.html_path = find_extension(folder_path, '.html') # Busca el html en el directorio de extracción.\n else: # Caso de html proporcionado directamente.\n self.html_path.append(self.input_file)\n if not self.html_path: # En caso de que no exista ningún html.\n raise IOError('html file not found.')\n for path in self.html_path: # Almacena cada uno de los html parseados en un diccionario.\n html_file = open(path, encoding=\"utf8\") # Almacena los datos del html.\n parsed_html = BeautifulSoup(html_file, \"lxml\") # Hay que instalar lxml.\n self.parsed_html_dic.update({os.path.splitext(os.path.basename(path))[0]:parsed_html})", "def get_html(self):\r\n pass", "def get_text_only(self, soup):\n val = soup.string\n # see if we have a text element\n if val is None:\n conts = soup.contents\n resulttext = ''\n # not text so continue recursing through the tags\n for tag in conts:\n subtext = self.get_text_only(tag)\n resulttext += subtext + '\\n'\n return resulttext\n return val.strip()", "def strip_html(text):\n soup = BeautifulSoup(text, \"html.parser\")\n return soup.get_text()", "def scrape(self):\n\n self.url = self.headline.url\n\n # Should raise exception...\n if not self.parsing_template:\n return None, None, None, None, None\n\n try:\n response = self.download()\n self.source = response.text\n except:\n return None, None, None, None, None\n\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n if soup:\n return self.parse(soup)\n else:\n return None, None, None, None, None", "def get_html(self, *args, **kwargs):\n return Text(self.get_data(*args, **kwargs), escape=False)", "def text_from_html(soup):\n\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n\n return u\" \".join(t.strip() for t in visible_texts)", "def parse_text(self, page):\n text = page.find(self.tag_prefix + self.revision_tag).find(self.tag_prefix + self.text_tag).text\n title = page.find(self.tag_prefix + self.title_tag).text\n categories = []\n #\n text = self.parse_archivo(text)\n text = self.parse_foto(text)\n text = self.parse_by_line(text)\n text = self.parse_link(text)\n text = self.parse_url(text)\n text = self.parse_fecha(text)\n text = self.parse_bracketed_word(text)\n #\n if text:\n categories = re.findall(self.category_finder_regex, text)\n #\n text = self.parse_category(text)\n text = self.parse_other_language(text)\n text = self.parse_table_regex(text)\n text = self.parse_ver_fuente(text)\n text = self.remove_extra_text(text)\n text = self.remove_extra_characters(text)\n\n categorias = []\n for cat in categories:\n categorias.append(cat[6])\n\n if text:\n if 'REDIRECT' in text or 'redirect' in text:\n return None\n\n return Article(title=title, content=text, categories=categorias)", "def get_content(html_soup):\n text_above_image = html_soup.findAll('div', attrs = {\"class\" : \"rs-content abstract\"})\n if len(text_above_image) > 1:\n text_above_image = text_above_image[1].get_text()\n else:\n text_above_image = '' \n text_below_image = html_soup.find('div', attrs = {\"class\" : \"body\"}).get_text()\n content = text_above_image + text_below_image\n \n return content", "def parse_html(city, html):\n return city.parse_html(html)", "def parse(self, html=None):\n if html:\n self.html = html.encode('utf-8').decode('utf-8')\n\n # lets do the actual parsing\n self._parse()\n\n # Apply subclass specific behaviour after parsing has happened\n # This is needed because different parsers need to clean/modify\n # the parsed data uniquely.\n self.after_parsing()", "def strip_tags(self, html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()", "def parse_text(self):\n self.text={}\n for i, lang in enumerate(LANGS):\n text=file(self.src).read()\n self.text[lang]=\"\"\n extracted, finish = \"\", 0\n start_string, stop_string = r\"<!--%s-->\" % lang, r\"<!--/%s-->\" % lang\n # Iterates to check multiple blocks of text within the file!\n # Pay attention to infinite loops!\n # AttributeError exception raised when no more blocks to extract exist\n while True:\n try:\n start=re.compile(start_string, re.IGNORECASE).search(text).span()[1]\n finish=re.compile(stop_string, re.IGNORECASE).search(text).span()[0]\n extracted+=text[start:finish]\n text=text[finish+1:]\n except AttributeError:\n break\n self.text[lang]+=extracted", "def get_content(response_text):\n soup = BeautifulSoup(response_text, 'html.parser')\n try:\n return soup.find('span', class_='total-entries').get_text()\n except AttributeError:\n return \"Failed to get results\"", "def get_wiki_content(self):\n url = \"https://fr.wikipedia.org/w/api.php?action=query&prop=extracts&exsentences=4&explaintext=&pageids={}&format=json\".format(self.page_id)\n self.page = str(self.page_id)\n self.response = requests.get(url)\n self.data = self.response.json()\n self.wiki_data = (self.data['query']['pages'][self.page]['extract'])\n return (self.wiki_data)", "def unhtml(cls, text):\n parser = cls()\n parser.feed(text)\n return parser", "def raw_text(self):\n return self._raw_text", "def parse_content(self, api):\n abstract = ''\n for tag in api.next_siblings:\n if tag.name == 'hr' or tag.name == 'blockquote':\n break\n elif tag.name == 'div':\n continue\n else:\n abstract+=(str(tag))\n return abstract.strip('\\n')", "def get_html2text(html):\n text_maker = html2text.HTML2Text()\n text_maker.body_width = 0\n return text_maker.handle(html)", "def _course_info_content(html_parsed):\n if len(html_parsed) == 1:\n # could enforce that update[0].tag == 'h2'\n content = html_parsed[0].tail\n else:\n content = html_parsed[0].tail if html_parsed[0].tail is not None else \"\"\n content += \"\\n\".join([html.tostring(ele).decode('utf-8') for ele in html_parsed[1:]])\n return content", "def strip_html(inputString):\r\n return BeautifulSoup(inputString, \"html.parser\").text", "def extract_text(url, sem):\n with (yield from sem):\n page = yield from get(url)\n\n tree = etree.HTML(page)\n paragraphs = tree.findall('.//*/div[@class=\"entry-content\"]/p')[1:-1]\n return url, b'\\n'.join(map(etree.tostring, paragraphs))", "def extractSearchResults(self, html):\n results = list()\n soup = BeautifulSoup(html, 'html.parser')\n div = soup.find('div', id='main')\n if (type(div) == types.NoneType):\n div = soup.find('div', id='center_col')\n if (type(div) == types.NoneType):\n div = soup.find('body')\n if (type(div) != types.NoneType):\n lis = div.findAll('a')\n if(len(lis) > 0):\n for link in lis:\n if (type(link) == types.NoneType):\n continue\n \n url = link['href']\n if url.find(\".google\") > 6:\n continue\n \n url = self.extractUrl(url)\n if(cmp(url, '') == 0):\n continue\n title = link.renderContents()\n title = re.sub(r'<.+?>', '', title)\n result = SearchResult()\n result.setURL(url)\n print '### URL: ' + url\n result.setTitle(title)\n span = link.find('div')\n if (type(span) != types.NoneType):\n content = span.renderContents()\n content = re.sub(r'<.+?>', '', content)\n result.setContent(content)\n results.append(result)\n return results", "def clean_html(self):\n self.cleaned_html = self.html.strip()\n for begin_splitter in self.begin_splitters:\n self.cleaned_html = self.cleaned_html.split(begin_splitter)[-1]\n for end_splitter in self.end_splitters:\n self.cleaned_html = self.cleaned_html.split(end_splitter)[0]\n self.cleaned_html = self.cleaned_html.strip()\n return self.cleaned_html", "def _extract_tags(html):\n tags = re.findall(r'<[^>]+>', html)\n\n return tags", "def _filter(self, text, context, encoding):\n\n content = []\n soup = bs4.BeautifulSoup(text, self.parser)\n soup = self.get_sub_node(soup)\n blocks, attributes, comments = self.to_text(soup)\n if self.comments:\n for c, desc in comments:\n content.append(filters.SourceText(c, context + ': ' + desc, encoding, self.type + 'comment'))\n if self.attributes:\n for a, desc in attributes:\n content.append(filters.SourceText(a, context + ': ' + desc, encoding, self.type + 'attribute'))\n for b, desc in blocks:\n content.append(filters.SourceText(b, context + ': ' + desc, encoding, self.type + 'content'))\n return content", "def get_gp_text_description(html):\n m = re.search('<div id=\"doc-description-container\"', html)\n desc_section_start = html[m.start():]\n m = re.search('</div>', desc_section_start)\n desc_section = desc_section_start[:m.start()]\n cleaned_desc = filter(lambda x: x in string.printable, desc_section)\n parser = HTMLParser()\n return parser.unescape(nltk.clean_html(cleaned_desc))", "def dissect(self, text):", "def extract(self, data):", "def __init__(self, html_soup):\n # Drilling down to the internal wrapper <div> tag\n self.data = html_soup.find('div', class_='sbkBrv_SingleResultDesc')", "def fetch_doc_text_body(self, document_level, find_query_mixin={}):\n find_query = {'subreddit': self.subreddit, 'postwise.text':{'$exists':True}}\n find_query.update(find_query_mixin)\n\n if document_level != 'postwise':\n raise NotImplementedError('document_level:%s' % document_level)\n\n print 'found %i matching the query for text body docs' % self.posts_read.find(find_query).count()\n\n for doc in self.posts_read.find(find_query):\n yield doc['_id'], doc[document_level]['text']", "def extract_one( html: str, fpath: Path ):\n # %%\n doc = BeautifulSoup( html, features='html.parser')\n\n ret = { 'linkedin_handle': fpath.name.split('.')[0] }\n _parse_top_card( ret, doc )\n # %%\n ret['about'] = _extract_about( doc )\n # if len(ret['about']) < 100 and ret['about'].find('ver más') > 0:\n # print( f\"\\nVer más detected: \\nabout:{ret['about']} fpath={fpath}\" )\n\n ret['about_stats'] = {'about_eng_ratio': _common_english_ratio(ret['about'])}\n # %%\n ret['work_experience'] = _parse_experiences( doc )\n ret['work_stats'] = calc_work_stats( ret['work_experience'])\n # %%\n ret['skills'] = proc_skills_section( doc )\n ret['education'] = _parse_education( doc )\n ret['education_stats'] = _education_stats( ret['education'])\n ret['accomplishments'] = _extract_accomplishments(doc)\n ret['profile_text_stats'] = profile_text_stats( doc )\n # %%\n return ret\n # %%", "def parse_announcement_data(self) -> 'Scraper':\n logger.info('Parsing extracted html partial')\n for tag in self.html_partial: # there are 63 tags\n if tag.name == 'h4':\n announcement_data = self.get_data_from_tag(tag)\n self.announcement_data_list.append(announcement_data)\n logger.info('Compiled announcement data list from html web page partial')\n return self", "def tokenize_html(self, path):\n with open(path, errors=u'ignore') as f:\n soup = BeautifulSoup(f, u'lxml')\n if soup.title:\n self.title = soup.title.text\n junk = [u'head', u'script', u'style']\n for e in soup(junk):\n e.decompose()\n text = soup.get_text(separator=u' ')\n self.tokenize(text)\n if self._config[u'index_urls']:\n self.tokenize_href(soup)", "def plain_text_(self):\n return self.content.decode(self.encoding)", "def get_article_text(self, article_webpage):\n lemonde_parser = LeMondeHTMLParser()\n lemonde_parser.feed(article_webpage)\n return lemonde_parser.article_data", "def parse(html, encoding='utf-8'):\n if isinstance(html, unicode):\n return bs4.BeautifulSoup(html, 'html.parser')\n\n return bs4.BeautifulSoup(html, 'html.parser', from_encoding=encoding)", "def parse(self, text, html=True):\r\n self._urls = []\r\n self._users = []\r\n self._lists = []\r\n self._tags = []\r\n\r\n reply = REPLY_REGEX.match(text)\r\n reply = reply.groups(0)[0] if reply is not None else None\r\n\r\n parsed_html = self._html(text) if html else self._text(text)\r\n return ParseResult(self._urls, self._users, reply,\r\n self._lists, self._tags, parsed_html)", "def _parse_text(self, element):\n try:\n return element.text\n except AttributeError:\n return None", "def get_txt(url):\n soup = make_soup(url)\n\n # remove scripts and styling\n for script in soup([\"script\", \"style\"]):\n script.extract() # rip it out\n text = soup.get_text()\n\n # clean data\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n text = '\\n'.join(chunk for chunk in chunks if chunk)\n\n return text", "def summary(self, html_partial=False):\n try:\n ruthless = True\n while 1:\n self._build_doc(True)\n #pangwei add on 2014/12/08 begin\n for elem in self.tags(self._root, 'footer', 'select'):\n elem.drop_tree()\n #pangwei add on 2014/12/08 end\n for elem in self.tags(self._root, 'script', 'style'):\n elem.drop_tree()\n for elem in self.tags(self._root, 'body'):\n elem.set('id', 'readabilityBody')\n if ruthless:\n self.remove_unlikely_candidates()\n self.transform_misused_divs_into_paragraphs()\n \n candidates = self.score_paragraphs()\n best_candidate = self.select_best_candidate(candidates)\n if best_candidate:\n article = self.get_article(candidates, best_candidate,html_partial=html_partial)\n else:\n if ruthless:\n ruthless = False\n continue\n else:\n article = self._root.find('body')\n if article is None:\n article = self._root\n \n cleaned_article = self.sanitize(article, candidates)\n article_length = len(cleaned_article or '')\n retry_length = self.kwargs.get('retry_length',self.RETRY_LENGTH)\n of_acceptable_length = article_length >= retry_length\n if ruthless and not of_acceptable_length:\n ruthless = False\n continue\n else:\n return cleaned_article\n \n except StandardError, e:\n raise Unparseable(str(e)), None, sys.exc_info()[2]", "def from_html(self, html):\n\n # Try 1: Search popular author tags for authors\n\n matches = []\n _authors = []\n doc = string_to_doc(html)\n\n for attr in self.ATTRS:\n for val in self.VALS:\n found = doc.xpath('//*[@%s=\"%s\"]' % (attr, val))\n matches.extend(found)\n\n for match in matches:\n content = u''\n\n if match.tag == 'meta':\n mm = match.xpath('@content')\n if len(mm) > 0:\n content = mm[0]\n\n else: # match.tag == <any other tag>\n content = match.text or u'' # text_content()\n\n if len(content) > 0:\n _authors.extend(self.from_string(content))\n\n return format_authors(_authors)", "def htmlParsePage(page):\n if 'parsedHtml' not in page:\n logging.debug('Parsing HTML')\n html = page['data']\n html = html.replace(' xmlns=\"http://www.w3.org/1999/xhtml\"', '')\n html = removeThreeByteUtf(html)\n page['parsedHtml'] = BeautifulSoup(html)" ]
[ "0.6755422", "0.6671859", "0.6553396", "0.64243054", "0.6272383", "0.623531", "0.623531", "0.6228035", "0.61742455", "0.61664754", "0.61556983", "0.6104006", "0.6092044", "0.60168433", "0.59852856", "0.59402233", "0.5923603", "0.59181535", "0.59116304", "0.58720744", "0.5800878", "0.5799446", "0.57864213", "0.5786221", "0.5746921", "0.5743207", "0.5730884", "0.5718094", "0.57174754", "0.5703741", "0.5657854", "0.5625382", "0.5613903", "0.5595179", "0.5587094", "0.5579994", "0.5579841", "0.55772185", "0.55074877", "0.5484987", "0.5475086", "0.5470002", "0.5454843", "0.5443663", "0.5437496", "0.54365087", "0.5423892", "0.54178953", "0.54114705", "0.537941", "0.5371827", "0.53698283", "0.53655905", "0.53538924", "0.53526753", "0.53438026", "0.53354335", "0.5307913", "0.53078467", "0.5301645", "0.52909505", "0.52894753", "0.5281215", "0.52806336", "0.52755463", "0.5268327", "0.52654904", "0.5259136", "0.5254485", "0.5250792", "0.524843", "0.52378803", "0.5234496", "0.5233922", "0.52326936", "0.5231213", "0.5217178", "0.52148736", "0.52129036", "0.52086675", "0.52063626", "0.5204506", "0.5203306", "0.5192229", "0.5191089", "0.51910233", "0.51669675", "0.5163411", "0.51590663", "0.5149865", "0.51407355", "0.51364625", "0.512467", "0.51246023", "0.5121455", "0.51175874", "0.51161003", "0.5105009", "0.5100987", "0.5095287" ]
0.73629075
0
We override the normal solve() so that we do not have to enter all 343,000 state_targets for this class.
def solve(self, print_steps=False) -> bool: (state, _cost_to_goal) = self.ida_heuristic() steps = self.steps(state) if steps: for step in steps: self.parent.rotate(step) if print_steps: logger.info(f"{self}: step {step}") return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve(self, state, times):", "def solve(self, current_state: dict) -> dict:", "def solution(self) -> State:", "def solve(self):\n pass", "def solve(self):\n pass", "def solve(self):\n ...", "def solve(self):", "def solve(self):\n \n raise NotImplementedError(\"not implemented!\")", "def solve(self):\n raise NotImplementedError(\"This method needs to be implemented.\")", "def solve(self):\n for step in self.run.values():\n step.solve()", "def solve(self, **kwargs) -> OptimizationResult:\n raise NotImplementedError", "def solve(self, solver):\n solver.solve()", "def solve(self, **kwargs):\n return self.system.solve(**kwargs)", "def solve(self, problem, find_all_solutions=False):\n # actions and total_cost will be set if there is a path,\n # otherwise, they will still be None\n self.actions = None\n self.total_cost = None\n self.num_states_explored = 0 # for comparison against UCS\n\n start_state = problem.start_state()\n frontier = [([], start_state),]\n visited = set([])\n actions_tried = []\n\n while len(frontier) > 0:\n prev_actions, state = frontier.pop()\n actions_tried.append(prev_actions)\n visited.add(state)\n self.num_states_explored += 1\n if self.num_states_explored % 250 == 0:\n print(\"explored\", self.num_states_explored, \"states\")\n\n # print(\"---\")\n # print(\"state: (prev actions were: %s)\" % (prev_actions, ))\n # print(\" \", state.graph)\n # print(\" \", state.node_colors)\n # print(\" moves left: %d\" % (state.moves_left,))\n if problem.is_terminal_state(state) == 1:\n print(\"solution found! \", prev_actions)\n print(\"num states explored: \", self.num_states_explored)\n problem.actions = self.actions\n problem.num_states_explored = self.num_states_explored\n # print(\"explored these actions:\")\n # for action_tried in actions_tried:\n # print(action_tried)\n\n if find_all_solutions: continue\n else: break\n if problem.is_terminal_state(state) == -1:\n continue\n for (action, next_state, cost) in problem.actions_and_costs(state):\n if (next_state not in visited and\n problem.is_terminal_state(next_state) != -1):\n actions = copy.deepcopy(prev_actions)\n actions.append(action)\n frontier.append((actions, next_state))", "def actualSolve(self, lp):\n\t\traise NotImplementedError", "def solve_step(self,puzzle_grid,x,y):\n self.puzzleGrid = puzzle_grid\n if(self.foundStep == False):\n self.targetCell = self.puzzleGrid.grid[x][y]\n if(self.targetCell.isSolved == False):\n self.calculate_possibilities()\n if len(self.targetCell.possibilities) == 1: #README method 1\n self.targetCell.solve()\n return True\n else:\n return self.check_neighbours() #README method 2", "def pfd_solve (r, w) :\n\tglobal targets\n\ta = [0, 0]\n\tpfd_initialize(r, a)\n\ttargets_array = []\n\tpfd_find_first_target()\n\t\n\tresultArr = []\n\n\twhile len(targets) > 0:\n\t\ttarget = heapq.heappop(targets)\n\t\tresultArr.append(target+1)\n\t\tnew_targets = pfd_clear(target)\n\n\t\tfor i in new_targets:\n\t\t\tdependencies_list[i]-=1\n\t\t\tif dependencies_list[i] == 0:\n\t\t\t\theapq.heappush(targets,i)\n\t\t\t\t\n\t#Prints the result\n\tfor i in xrange(len(resultArr)) :\n\t print resultArr[i],", "def solve(self):\n new_puzzle = self._puzzle.clone()\n self._solution = new_puzzle.solve_puzzle()\n del new_puzzle\n pass", "def main():\n A = np.array([\n [40, 36],\n [36, 45]\n ])\n b = np.array([-64, -66])\n c = 27\n solve(Task1TargetFunction(A, b, c))", "def _map_state_vars_and_eqs(self):\n\n def get_used_eqs_and_state_vars(eq_to_expand, equations):\n \"\"\" Returns used equations and state vars for a given equation\n\n :param eq_to_expand: list containing equations to recurse over and expand definitions for\n note: expecting equations in [(lhs, rhs)] form.\n :param equations: set of equations to look for definitions in.\n :return: set of equations and set of used state vars.\n \"\"\"\n used_state_vars = set()\n for eq in eq_to_expand:\n for v in eq[1].atoms(Derivative) | eq[1].free_symbols:\n if v in self._model.state_vars:\n used_state_vars.add(v)\n elif v not in [e[0] for e in eq_to_expand]:\n eq_to_expand.extend(filter(lambda e: e[0] == v, equations))\n return set(eq_to_expand), used_state_vars\n\n for i, deriv in enumerate(self._model.y_derivatives):\n equations, used_state_vars = \\\n get_used_eqs_and_state_vars([(d.lhs, d.rhs) for d in self._derivative_equations if d.lhs == deriv],\n set(map(lambda e: (e.lhs, e.rhs), self._derivative_equations)))\n\n # get all the variables used in jacobian matrix entry and all variables used to define them\n used_jacobian_vars, used_jacobian_state_vars = \\\n get_used_eqs_and_state_vars([(None, self._jacobian_matrix[i, i])], set(self._jacobian_equations))\n\n for sv in self._formatted_state_vars:\n sv.setdefault('in_evaluate_y_derivative', []).append(sv['sympy_var'] in used_state_vars)\n sv.setdefault('in_evaluate_partial_derivative', []).append(sv['sympy_var'] in used_jacobian_state_vars)\n\n for eq in self._vars_for_template['y_derivative_equations']:\n self.eq_in_evaluate_y_derivative(eq, equations)\n\n for je in self._vars_for_template['jacobian_equations']:\n self.eq_in_evaluate_partial_derivative(je, used_jacobian_vars)", "def __solve(self) -> None:\n pyo.TransformationFactory(\"contrib.detect_fixed_vars\").apply_to(self.model) # type: ignore\n pyo.TransformationFactory(\"contrib.deactivate_trivial_constraints\").apply_to(self.model) # type: ignore\n\n # initialise the solver object\n self._logger.debug(\"[ModelSolver] Solver object initiated...\")\n solver = Config.OPTIMISATION_MODEL_CONFIG['SOLVER_TYPE']\n opt = pyo.SolverFactory(solver)\n if Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver) is not None:\n for k, v in Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver).items():\n opt.options[k] = v\n\n try:\n start_time = datetime.now()\n self._logger.debug(\"[ModelSolver] Solver starting...\")\n results = opt.solve(self.model, tee=True)\n self.results = results\n end_time = datetime.now()\n self._logger.info(f\"[ModelSolver] Solver completed in {end_time - start_time}.\")\n except Exception as e:\n raise Exception(f\"Model optimisation failed with {solver} with error message {e}.\")\n\n if (results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal):\n self._logger.info(\"Solution is feasible and optimal\")\n results.write()\n elif results.solver.termination_condition == TerminationCondition.infeasible:\n raise ValueError(\"Model optimisation resulted into an infeasible solution\")\n\n self.model.optimised = True", "def solve(game, state_dim, num_controls, params, save_path, seed=None, verbose=True):\n if seed is not None:\n tf.set_random_seed(seed=seed)\n np.random.seed(seed=seed)\n agent = _Agent(state_dim, num_controls, params, verbose)\n state = game.reset()\n agent.reset()\n while agent.continues():\n controls = agent.explore(state)\n next_state, reward, done, _ = game.step(controls)\n done = agent.step(state, controls, reward, next_state, done)\n state = next_state\n if done:\n agent.show_progress(game)\n state = game.reset()\n agent.reset()\n agent.training_step()\n agent.save(save_path)", "def solve(self):\n self.enforce_node_consistency()\n self.ac3()\n return self.backtrack(dict())", "def solve(self):\n self.enforce_node_consistency()\n self.ac3()\n return self.backtrack(dict())", "def solve(self):\n self.enforce_node_consistency()\n self.ac3()\n return self.backtrack(dict())", "def setConstQuantityAndBetaEqState(self, pointDict, quantity, target):\n print \"setConstQuantityAndBetaEqState: \", pointDict\n assert 'ye' not in pointDict, \"You can't SPECIFY a Ye if you're \" \\\n \"setting neutrinoless beta equlibrium!\"\n self.validatePointDict(pointDict)\n assert len(pointDict) < 2, \"State overdetermined for more than 1 indVars!\"\n #todo: check quantity is valid 3D table\n\n #defines 1D root solver to use in routine\n solveRoot = scipyOptimize.brentq # solveRootBisect\n\n solveVarName = 'logtemp'\n currentSolveVar = 0.0\n currentYe = 0.25\n #previous variables used to measure convergence of solve\n # so set them to something significantly different than starting values\n previousSolveVar = 100.0\n previousYe = 100.0\n yeError = relativeError(currentYe, previousYe)\n solveVarError = relativeError(currentSolveVar, previousSolveVar)\n otherVarName = pointDict.keys()[0]\n otherVar = pointDict.values()[0]\n\n maxIters = 5\n tol = 1e-3\n\n iteration = 0\n while iteration < maxIters and yeError + solveVarError > tol/2.0:\n previousSolveVar = currentSolveVar\n previousYe = currentYe\n getSolveVar = lambda x: multidimInterp((currentYe, x, otherVar),\n [self.h5file['ye'][:],\n self.h5file[solveVarName],\n self.h5file[otherVarName]],\n self.h5file[quantity][...],\n linInterp, 2) - target\n try:\n currentSolveVar = solveRoot(getSolveVar,\n self.h5file[solveVarName][0],\n self.h5file[solveVarName][-1],\n (),tol)\n except ValueError as err:\n print \"Root for log10(T) not bracketed on entire table: \" \\\n + str(err)\n # see if lower or upper temperature bound best\n logtemp = self.h5file['logtemp']\n answer1 = multidimInterp((currentYe, logtemp[0], otherVar),\n [self.h5file['ye'][:],\n self.h5file['logtemp'],\n self.h5file['logrho']],\n self.h5file[quantity][...],\n linInterp, 2) - target\n answer2 = multidimInterp((currentYe, logtemp[-1], otherVar),\n [self.h5file['ye'][:],\n self.h5file['logtemp'],\n self.h5file['logrho']],\n self.h5file[quantity][...],\n linInterp, 2) - target\n\n if (abs(answer1) < abs(answer2)):\n currentSolveVar = self.h5file['logtemp'][0]\n print \"Recovering with lowest table value, answer: %s\" % currentSolveVar\n else:\n currentSolveVar = self.h5file['logtemp'][-1]\n print \"Recovering with highest value, answer: %s\" % currentSolveVar\n\n getYe = lambda x : multidimInterp((x, currentSolveVar, otherVar),\n [self.h5file['ye'][:],\n self.h5file[solveVarName],\n self.h5file[otherVarName]],\n self.h5file['munu'][...],\n linInterp, 2)\n #check for bracketing error in root solve for ye\n try:\n currentYe = solveRoot(getYe,\n self.h5file['ye'][0],\n self.h5file['ye'][-1], (), tol)\n except ValueError as err:\n print \"Error in scipy root solver solving for ye: \", str(err)\n currentYe = self.findYeOfMinAbsMunu((currentSolveVar, otherVar))\n print \"Recovering with findYeOfMinAbsMunu, answer: %s\" % currentYe\n #print \"currentYe: \", currentYe, \"\\tcurrentT: \", currentSolveVar\n\n yeError = relativeError(currentYe, previousYe)\n solveVarError = relativeError(currentSolveVar, previousSolveVar)\n iteration += 1\n #print \"errs: \", yeError, solveVarError\n\n newDict = pointDict.copy()\n newDict['ye'] = currentYe\n temp = numpy.power(10.0,currentSolveVar) # TODO TEMP HARD CODE\n newDict['temp'] = temp\n self.setState(newDict)\n return currentYe, temp # TODO TEMP HARD CODE", "def solve(self):\n if not self.running or self.state == \"stopping\":\n return False\n\n # Find first empty tile\n target = ()\n for i in range(9**2):\n if self.board[i // 9, i % 9] == 0:\n target = (i // 9, i % 9)\n break\n\n # If there are no empty tiles, the puzzle is solved\n if not target:\n return True\n\n # Tests all possible values\n for value in range(1, 10):\n if not self.isPossibleAssign(target, value):\n continue\n\n self.update_board(target, value)\n\n if self.solve():\n return True\n\n # In case of failure, reset and return False\n self.update_board(target, 0)\n\n return False", "def solve(self) -> Dict:\n solution = self.opt.decision_variables.vec2dict(self._solve())\n\n if self._error_on_fail and (not self.did_solve()):\n raise RuntimeError(\"Solver failed!\")\n\n # Add full model state to the solution dictionary\n for model in self.opt.models:\n for d in model.time_derivs:\n n_s = model.state_name(d)\n n_s_x = model.state_optimized_name(d)\n if isinstance(model, RobotModel):\n if model.num_param_joints > 0:\n n_s_p = model.state_parameter_name(d)\n t = solution[n_s_x].shape[1]\n solution[n_s] = cs.DM.zeros(model.dim, t)\n solution[n_s][model.optimized_joint_indexes, :] = solution[\n n_s_x\n ]\n solution[n_s][model.parameter_joint_indexes, :] = self._p_dict[\n n_s_p\n ]\n else:\n solution[n_s] = solution[n_s_x]\n else:\n solution[n_s] = solution[n_s_x]\n\n return solution", "def solve(self, X,missing_mask):\n raise ValueError(\"%s.solve not yet implemented!\" % (\n self.__class__.__name__,))", "def solve(self, b):\n raise NotImplementedError", "def solve(self, tf_days=DAYS, numpoints=NUMPOINTS):\n tspan = np.linspace(0, tf_days, numpoints)\n sol = call_solver(self.__class__.FUNC, self.p, self.w0, tspan)\n # Multiply by the population\n sol[:, 1:] *= self.pop\n\n self.sol = sol\n return self", "def _double_q_state_value_estimate(self, state, nonterminal_mask, non_final_states, feasible_mask):\n next_state_values = to_variable(to_cuda(torch.zeros(state[0].size(0)).float(), self.gpu_device))\n nonterminal_feasible_mask = feasible_mask[nonterminal_mask.nonzero().view(-1)]\n predictions_dq = self.model(to_cuda(non_final_states, self.gpu_device))\n # modifying predictions by adjusted to ensure max value is within feasible action set\n adjuster = 2 * max(abs(predictions_dq.min().data[0]), predictions_dq.max().data[0])\n adjusted_predictions_dq = predictions_dq - adjuster\n adjusted_predictions_dq[nonterminal_feasible_mask] += adjuster\n max_vals_dq, max_inds_dq = adjusted_predictions_dq.max(1)\n predictions = self.old_model(to_cuda(non_final_states, self.gpu_device))\n next_state_values[nonterminal_mask] = predictions.gather(1, max_inds_dq.view(-1, 1))\n next_state_values.volatile = False\n return next_state_values", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def change(a, b, state, target):\n # a = random.randrange(1, 199, 2)\n # b = random.randrange(1, 199, 2)\n new_state = ''\n for i in range(len(state)):\n if i == a:\n new_state += b\n continue\n new_state += state[i]\n # print(new_state)\n res = solving(int(new_state[0]), int(new_state[2]), new_state[1])\n for i in range(2, len(new_state) - 2, 2):\n res = solving(res, int(new_state[i + 2]), new_state[i + 1])\n # print(\"Distance from target: \", target - res)\n return new_state, abs(target - res)", "def __init__(self, sparse_args=None, solve=True):\n self.solved = False\n self.sparse_args = sparse_args\n self.solved = False\n if solve: self.solve()", "def test_solve_task(self):\n pass", "def solve(instructions: Iterator[Instruction], state: StateProtocol) -> int:\n for instruction in instructions:\n state.apply(instruction)\n\n return state.manhatam_distance", "def _q_state_value_estimate(self, state, nonterminal_mask, non_final_states, feasible_mask):\n next_state_values = to_variable(to_cuda(torch.zeros(state[0].size(0)).float(), self.gpu_device))\n nonterminal_feasible_mask = feasible_mask[nonterminal_mask.nonzero().view(-1)]\n predictions = self.old_model(to_cuda(non_final_states, self.gpu_device))\n # modifying predictions by adjusted to ensure max value is within feasible action set\n adjuster = 2 * max(abs(predictions.min().data[0]), predictions.max().data[0])\n adjusted_predictions = predictions - adjuster\n adjusted_predictions[nonterminal_feasible_mask] += adjuster\n next_state_values[nonterminal_mask] = adjusted_predictions.max(1)[0]\n next_state_values.volatile = False\n return next_state_values", "def get_sol(self):", "def solve(self, use_cache=True):\n if self.parallel:\n self.solve_all_parallel(use_cache)\n else:\n self.solve_all(use_cache)", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n self.visited_states.append(self.currentState.state)\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n # If current state has no children, make children\n if not self.currentState.children:\n for movable_statement in movables:\n # Make the move\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n # print (\"new state \", new_state)\n\n # If the new state hasn't been visited and isn't in the queue then add it as a child and to the queue\n if (new_state not in self.visited_states):\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.visited[new_gs] = True\n self.visited_states.append(new_state)\n self.gs_queue.append(new_gs)\n\n self.gm.reverseMove(movable_statement)\n\n # Return false if no more to explore\n if not self.gs_queue:\n return False\n\n # Revert to state at when current and next start to change\n root_curr = self.currentState\n self.currentState = self.gs_queue.popleft()\n root_new = self.currentState\n\n # Backtrack to when current node and new node start to diverge\n if root_new.depth == root_curr.depth:\n while root_curr.state != root_new.state:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n root_new = root_new.parent\n else:\n while root_curr.requiredMovable:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n\n # Return game master to state that we are exploring\n # Find path between root and current state\n path = []\n currNode = self.currentState\n while currNode != root_curr:\n path.append(currNode.requiredMovable)\n currNode = currNode.parent\n\n # Created backwards path, now make moves from root to current state\n path.reverse()\n for movable_statement in path:\n self.gm.makeMove(movable_statement)\n\n return False", "def solve(self) -> jnp.ndarray:\n pass", "def solve(self,**kwargs):\n if kwargs.pop('restart',False):\n self.nopt = 0\n savefile = kwargs.pop('savebase',os.path.abspath(self.filename)+('_%02d.cysolve.pkl' % self.nloop))\n\n if kwargs.has_key('savedir'):\n savedir = kwargs['savedir']\n for isub in range(self.nspec):\n kwargs['isub'] = isub\n self.loop(**kwargs)\n print \"Saving after nopt:\", self.nopt\n self.saveState(savefile)\n \n self.pp_ref = self.pp_int\n self.nloop += 1", "def algorithm(self, min_state, state, max_state, agent_simulator_object_list, agent_simulator_name_list,\n initial_input_dict, time_step, dependencies, state_history):\n\n input_dict_with_extrapolation = copy.deepcopy(initial_input_dict)\n non_extrapolated_input_dict = copy.deepcopy(initial_input_dict)\n\n states = {}\n # store current state for every simulator\n for simulator_name in agent_simulator_name_list:\n states[simulator_name] = state\n\n # increase state in order to compute the next state\n output_state = state + time_step\n while all(min_state <= state < max_state for state in states.values()):\n # extrapolate all model's input data\n for agent_name in agent_simulator_name_list:\n previous_output_data_list = []\n for state_number, time_step_dict in state_history.items():\n previous_output_data_list.append(time_step_dict[agent_name]['output data'])\n input_dict_with_extrapolation[agent_name]['output data'] = \\\n self.extrapolate(previous_output_data_list)\n\n #  run the models with the input data\n for agent_simulator, agent_simulator_name in zip(agent_simulator_object_list, agent_simulator_name_list):\n # check on which inputs from other models the current model depends on\n curr_simulator_input = []\n for dependency in dependencies[agent_simulator_name]:\n # gather the input data\n new_data = input_dict_with_extrapolation[dependency]['output data']\n curr_simulator_input.append(new_data)\n\n # define current state and input for current model\n current_simulators_state = states[agent_simulator_name]\n new_input = {\"state\": current_simulators_state, \"output data\": curr_simulator_input}\n\n # execute current simulator with output from its dependent on simulators\n simulator_output = self.execute_simulator_with_output_from_other_simulator(\n agent_simulator, new_input, agent_simulator_name)\n\n # add newly computed output to stored inputs\n non_extrapolated_input_dict[agent_simulator_name] = simulator_output\n\n # increase simulators state\n try:\n states[agent_simulator_name] += time_step\n except IndexError:\n print(colored(\"\\n-----warning: state could not be increased------\\n\", \"red\"))\n\n # update state history with new data\n new_history_state = copy.deepcopy(non_extrapolated_input_dict)\n state_history[output_state] = new_history_state\n\n #  increase state and move to next time step\n output_state += time_step\n\n return states, state_history", "def solve(self):\n return self.solve_single_iteration(BruteForceRowSolver2.solve_brute_force_save_intermediate)", "def solve(game,\n state_dim, num_actions,\n params,\n save_path,\n seed=None,\n verbose=True):\n if seed is not None:\n tf.set_random_seed(seed=seed)\n np.random.seed(seed=seed)\n agent = _Agent(state_dim, num_actions, params, verbose)\n state = game.reset()\n agent.reset()\n while agent.continues():\n action = agent.explore(state)\n next_state, reward, done, info = game.step(action)\n done = agent.step(state, action, reward, next_state, done)\n state = next_state\n if done:\n agent.show_progress(game)\n state = game.reset()\n agent.reset()\n agent.training_step()\n agent.save(save_path)", "def did_solve(self) -> bool:\n pass", "def _solve(self, mu=None):\n pass", "def initial_state(numbers, operators, target):\n x = numbers\n opt = random.choice(operators)\n random.shuffle(x)\n state = str(x[0]) + opt + str(x[1])\n res = solving(x[0], x[1], opt)\n for i in range(2, len(x)):\n op = random.choice(operators)\n state += op + str(x[i])\n res = solving(res, x[i], op)\n print(\"S0:\", state)\n print(\"Distance from target: \", abs(target - res))\n print()\n return state, abs(target - res)", "def solve(self, regparam):\n self.regparam = regparam\n \n #Some counters for bookkeeping\n self.stepcounter = 0\n self.flipcounter = 0\n self.nochangecounter = 0\n \n #Cached results\n self.evals = np.multiply(self.svals, self.svals)\n self.newevals = 1. / (self.evals + self.regparam)\n newevalslamtilde = np.multiply(self.evals, self.newevals)\n self.D = np.sqrt(newevalslamtilde)\n #self.D = -newevalslamtilde\n \n self.VTY = self.svecs.T * self.Y\n DVTY = np.multiply(self.D.T, self.svecs.T * self.Y)\n \n #Using lists in order to avoid unnecessary matrix slicings\n self.DVTY_list = []\n self.YTVDDVTY_list = []\n self.classFitnessList = []\n for i in range(self.labelcount):\n DVTY_i = DVTY[:,i]\n self.DVTY_list.append(DVTY_i)\n YTVDDVTY_i = DVTY_i.T * DVTY_i\n self.YTVDDVTY_list.append(YTVDDVTY_i)\n fitness_i = self.size - DVTY_i.T * DVTY_i\n self.classFitnessList.append(fitness_i)\n \n self.Dsvecs_list = []\n self.svecsDDsvecs_list = []\n for i in range(self.size):\n Dsvec = np.multiply(self.D.T, self.svecs[i].T)\n self.Dsvecs_list.append(Dsvec)\n self.svecsDDsvecs_list.append(Dsvec.T*Dsvec)\n \n self.updateA()\n \n \n converged = False\n print(self.classcounts.T)\n if self.callbackfun is not None:\n self.callbackfun.callback(self)\n while True:\n \n converged = self.roundRobin()\n print(self.classcounts.T)\n if self.callbackfun is not None:\n self.callbackfun.callback(self)\n if converged: break\n \n if self.oneclass:\n self.Y = self.Y[:, 0]\n self.A = self.A[:, 0]\n self.results['predicted_clusters_for_training_data'] = self.Y\n self.predictor = self.svdad.createModel(self)", "def postsolve_manual(sol, dname, statelist = 'auto', printdone = 'yes'):\n global vmdict\n if type(statelist) == str:\n statelist = ['P_r', 'P_w', 'C4_nc_wb', 'C4_nc_WC']\n\n for i in range(len(np.array(dxlist))):\n if any([n == dxlist[i] for n in statelist]):\n vmdict[dxlist[i]][dname].append(sol[i][-1])\n ##\n vmdict['err'][dname].append(vmdict['P_w'][dname][-1]/ (vmdict['P_r'][dname][-1] + vmdict['P_w'][dname][-1]))\n vmdict['eta'][dname].append(vmdict['P_w'][dname][-1]/ vmdict['P_r'][dname][-1])\n if 'C4_nc_WC' in statelist:\n vmdict['pwc_c4'][dname].append(vmdict['C4_nc_WC'][dname][-1]/ (vmdict['C4_nc_WC'][dname][-1] + vmdict['C4_nc_wb'][dname][-1]))\n ##\n if printdone == 'yes':\n print('finished ' + str(len(vmdict[statelist[0]][dname])) + ' ' + dname)", "def solve(self, gradients):\n return", "def solve(self):\n\n self.queue.add(*self.moved.items)\n self.solving = True\n self.moved.items = []", "def solve(self):\r\n while not self.done():\r\n self.no_open_cells()\r\n self.all_cells_are_mines()\r\n self.no_mines()\r\n if not self.done():\r\n self.obvious_cells()\r\n if not self.done():\r\n made_progress = self.safe_neighbour_difference()\r\n if made_progress:\r\n continue\r\n if not self.done():\r\n made_progress = self.adjacent_combinations()\r\n if made_progress:\r\n continue\r\n return", "def _update_target(self):\n self.target_dqn.load_state_dict(self.dqn.state_dict())", "def actualSolve(self, lp):\n\t\tif lp.isMIP() and self.mip: return self.solve_CBC(lp)\n\t\telse: return self.solve_CLP(lp)", "def update_target_net(self):\n if self.n_steps % self.target_update_interval == 0:\n self.target_q.load_state_dict(self.working_q.state_dict())", "def solve(self):\n self.m.optimize()\n if self.m.status == GRB.OPTIMAL:\n self.solution = self.sol_as_mat()\n return self.solution", "def test_solve_one_player_3(self):\n self.rush_hour_data = rush_hour_data_3\n self.state_data = state_data_3\n self.execute_minimax_single_player()", "def check_sol (statess, bigX, littleX, bigY, littleY):\n\t\tcheckstates = copy.deepcopy(statess)\n\t\tcheckstates[bigX] = littleX\n\t\tcheckstates[bigY] = littleY\n\t\treturnval = constraint_generator(checkstates).get((bigX,bigY), False)\n\t\treturn returnval", "def solvePostNoOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains as many dummy targets as defenders, for defenders and attackers\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n overlapPlacements = getPlacements(defenders, targetNumWithDummies)\n placements = list(filter(lambda x: len(set(x)) == len(x), overlapPlacements))\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('PrimalWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[s,a,lam] * defenderSocialUtility(s,a,defenders,_dRewards,_dCosts,_dPenalties) for s in placements for a in attackerActions]) for lam in aTypes])\n c1 = [sum([w[s,a,lam] * aUtility(s,a,lam,_aPenalties,_aRewards) for s in placements]) \\\n >= sum([w[s,a,lam] * aUtility(s,b,lam,_aPenalties,_aRewards) for s in placements])\n for lam in aTypes for a in attackerActions for b in attackerActions if a != b]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints([sum([q[lam] * sum([w[s,a,lam] * utilityM(d,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[s,a,lam] * utilityM(e,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes])\n for m in defenders for d in targetRange for e in targetRange if d!=e])\n c3 = model.add_constraints([sum([w[(s,a,lam)] for s in placements for a in attackerActions]) == 1 for lam in aTypes])\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def _solve(self, solver):\n self.prob.solve(solver)\n if self.prob.status <= 0:\n raise Exception(\"Infeasible Solution.\")\n return {pid for pid, variable \n in self.player_vars.iteritems()\n if variable.varValue}", "def solve(self):\n\n # Open status display\n fmtstr, nsep = self.display_start()\n\n # Start solve timer\n self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Main optimisation iterations\n for self.k in range(self.k, self.k + self.opt['MaxMainIter']):\n\n # Update record of X and Y from previous iteration\n self.on_iteration_start()\n\n # Compute backtracking\n if self.opt['Backtrack'] is not None and self.k >= 0:\n self.timer.stop('solve_wo_btrack')\n # Compute backtracking\n self.backtrack.update(self)\n self.timer.start('solve_wo_btrack')\n else:\n # Compute just proximal step\n self.xstep()\n # Update by combining previous iterates\n self.ystep()\n\n # Compute residuals and stopping thresholds\n self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n frcxd, adapt_tol = self.compute_residuals()\n self.timer.start('solve_wo_rsdl')\n\n # Compute and record other iteration statistics and\n # display iteration stats if Verbose option enabled\n self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n itst = self.iteration_stats(self.k, frcxd)\n self.itstat.append(itst)\n self.display_status(fmtstr, itst)\n self.timer.start(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Call callback function if defined\n if self.opt['Callback'] is not None:\n if self.opt['Callback'](self):\n break\n\n # Stop if residual-based stopping tolerances reached\n if not self.opt['FastSolve']:\n if frcxd < adapt_tol:\n break\n\n # Increment iteration count\n self.k += 1\n\n # Record solve time\n self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Print final separator string if Verbose option enabled\n self.display_end(nsep)\n\n return self.getmin()", "def solve():\n game_state.is_solving = ~game_state.is_solving\n\n if game_state.is_solving:\n solve_button.set_label(\"Pause\")\n else:\n solve_button.set_label(\"Solve\")\n\n game_state.is_dirty = True\n\n return solve", "def actualSolve(self, lp):\n\t\t\traise RuntimeError, \"CPLEX_MEM: Not Available\"", "def solveOneStep(self):\n ### Student code goes here\n if self.currentState.state == self.victoryCondition:\n self.visited[self.currentState]=True\n return True\n return self.BFS()", "def solve_step(self,h,dstep):\n pass", "def solve(self,n_days = 100,init_state = None,start_date = None,d = 1):\n\n # If init state is not given we use I0\n if init_state is None:\n assert self.start_state is not None\n init_state = int(self.I0)\n\n # Transform init_state into state object\n init_state = self.make_state(init_state)\n\n # Safety checks\n tol = 2\n assert hasattr(self,\"compartments\")\n assert len(init_state) == len(self.compartments)\n # assert hasattr(self,\"N\")\n # assert np.abs(init_state.sum() - self.N) < tol,f\"Init state {init_state.values} does not sum to total population {self.N}\"\n assert n_days > self.offset\n \n # Grid of time points (in days)\n # Take offset into account\n offset = self.offset\n t = np.linspace(0, n_days - offset, (n_days - offset +1)*d)\n\n # Integrate the model equations over the time grid, t.\n states = odeint(self.derivative, init_state, t)\n\n # Converts to DataFrame and then to custom object\n states = pd.DataFrame(states,columns = self.compartments)\n if d > 1: \n states.index = states.index / d\n\n # Add offset into account\n if offset > 0:\n states.index = range(offset,n_days + 1)\n states = states.reindex(range(0,n_days + 1))\n states = states.fillna(method = \"bfill\")\n elif offset < 0:\n states.index = [x + offset for x in states.index]\n\n # Convert to custom object\n states = CompartmentStates(states)\n states.build_aggregates(self.states)\n\n # If start date is given, convert to dates\n if self.start_date is not None:\n start_date = self.start_date\n if start_date is not None:\n index = pd.to_datetime(start_date) + pd.TimedeltaIndex(states.index,unit = \"D\")\n states.index = index\n \n return states", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def call(self, states):\n # TODO: implement this ~\n l1 = tf.nn.relu(self.Q_1(states))\n l2 = tf.nn.relu(self.Q_2(l1))\n qVals = self.Q_3(l2)\n return qVals\n # return tf.argmax(qVals, 1)", "def _update_targets(self):\n for ga_main, ga_targ in zip(self.ga.variables, self.ga_.variables):\n ga_targ.assign(self._polyak * ga_targ + (1 - self._polyak) * ga_main)\n if self.use_lyapunov:\n for lc_main, lc_targ in zip(self.lc.variables, self.lc_.variables):\n lc_targ.assign(self._polyak * lc_targ + (1 - self._polyak) * lc_main)\n else:\n for q_1_main, q_1_targ in zip(self.q_1.variables, self.q_1_.variables):\n q_1_targ.assign(self._polyak * q_1_targ + (1 - self._polyak) * q_1_main)\n for q_2_main, q_2_targ in zip(self.q_2.variables, self.q_2_.variables):\n q_2_targ.assign(self._polyak * q_2_targ + (1 - self._polyak) * q_2_main)", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n while to_move < len(movables):\n # Make the move\n movable_statement = movables[to_move]\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n\n # Find out if this state has already been explored\n visited = False\n for visited_state in self.visited.keys():\n if visited_state.state == new_state:\n visited = True\n\n # If the new state hasn't been visited then add it as a child then move down to this child\n if not visited:\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.currentState = new_gs\n break\n\n # Else skip this state and try going to the next movable statement\n else:\n # print(\"SKIP THIS STATE\")\n self.gm.reverseMove(movable_statement)\n to_move += 1\n\n # Went all the way down to a leaf, backtrack\n if (to_move >= len(movables)):\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n\n return False", "def solve_environment(self):\n \n #The first problem formulation\n #K kinds of towers\n #See more details about problem formulation in the writeup \n \n #Get a full matrix of the concatenated coverage matrices for \n #each tower type. THis new matrix has dimensions:\n #(Ntowers) x (sum(potential sites)), where the sum o=is over all tower types\n coverage = np.hstack(i for i in self.coverage_matrices)\n print coverage\n print coverage.shape \n \n #Diagonal matrix of the values of each target\n #(for the scenarios where we don't care about maximizing covered value,\n #target_values is just all ones, so this is just the identity matrix)\n V = np.diag(self.target_values)\n \n #If doing scenario where we want to fortify weakest link, only makes\n #sense if all targets are equal value:\n if self.objective_type == 'min_entries':\n V = np.eye(len(self.target_values))\n\n #Get the matrix of coverage values / expected value saved:\n C = np.dot(V,coverage)\n print 'V', V\n print 'coverage', coverage\n print 'C', C\n \n \n #Since not gauranteed to reach global optimum on any particular initialization,\n #run a few times and take the best result.\n #Just define \"best result\" as the result which had the most overall \n #\"converged\" x, combined over all tower kinds. \n# for j in xrange(self.N_random_starts_max):\n \n \n a = 2. #1.\n tau = 1e-4\n N = sum(i for i in self.N_tower_sites)\n w = np.zeros(N)\n ones = np.ones(N)\n p = 1. #the exponents power when doing he exponent method:\n \n for i in xrange(self.N_reweighting_iterations_max):\n #The concatenated vector of occupancies: Concatenated over all\n #of the kinds of towers.\n x = cvx.Variable(N)\n \n #Different objective functions depending on which optimization problem.\n #These are defined in the scenarios in the main function.\n if self.objective_type == 'min_entries':\n operation = cvx.min_entries\n elif self.objective_type == 'sum_entries':\n operation = cvx.sum_entries\n else:\n raise Exception('must specify valid objective_type')\n \n #Objective function includes penalty term for non-binary x values\n if self.penalty_type == 'reweighted_L1':\n #objective = cvx.Maximize(t - x.T*w)\n objective = cvx.Maximize(operation(C*x - x.T*w))\n\n\n #Main constraints on 0<=x<=1\n constraints = [0<=x, x<=1]\n \n \n #And then for each kind of tower, append the constraint that there\n #be exactly N_i towers, or <= quota (depending on constraint type)\n if self.constraints__type == 'fixed_N_towers' or self.constraints__type == 'tower_quotas':\n for tk in xrange(self.N_tower_kinds):\n before_sum = np.concatenate(([0],np.cumsum(self.N_tower_sites)))[tk]\n print before_sum\n print before_sum + self.N_tower_sites[tk]\n if self.constraints__type == 'fixed_N_towers':\n constraints.append(cvx.sum_entries(\n x[before_sum : before_sum + self.N_tower_sites[tk]]\n )==self.N_towers[tk])\n elif self.constraints__type == 'tower_quotas':\n constraints.append(cvx.sum_entries(\n x[before_sum : before_sum + self.N_tower_sites[tk]]\n )<=self.budget__tower_quotas[tk])\n print x[before_sum : before_sum + self.N_tower_sites[tk]]\n \n elif self.constraints__type == 'total_cost':\n costs = np.hstack([np.repeat(self.budget__tower_unit_costs[tk],self.N_tower_sites[tk]) for tk in xrange(self.N_tower_kinds)])\n constraints.append(cvx.sum_entries(costs * x) <= self.budget__total_cost) \n \n \n \n\n\n \n \n print 'penalty_type', self.penalty_type\n print 'objective_type', self.objective_type\n print 'constraints__type', self.constraints__type\n print 'budget__tower_quotas', self.budget__tower_quotas\n print 'operation', operation\n print 'objective', objective\n print 'constraints', constraints\n cvx.Problem(objective, constraints).solve(verbose=self.VERBOSE)\n x = np.array(x.value).flatten()\n print 'x', x\n w = a/(tau+np.abs(x))\n p += 1.\n plt.figure(figsize=(5,5))\n plt.plot(x,marker='o')\n plt.savefig('histrograms_{}.png'.format(i))\n print \n \n \n \n \n #From the solution x, get the coordinates of those tower sites where we\n #really do want to place a tower\n #use = np.isclose(x,1.)\n for tk in xrange(self.N_tower_kinds):\n before_sum = np.concatenate(([0],np.cumsum(self.N_tower_sites)))[tk]\n y = x[before_sum : before_sum + self.N_tower_sites[tk]]\n inds = np.argsort(y)\n s = y[inds]\n use = np.where(s>.5)[0]\n print inds\n print s\n print use \n if self.constraints__type == 'fixed_N_towers':\n if len(use) != self.N_towers[tk]:\n print 'Solution did not converge properly. Choosing the K best towers.'\n print self.N_towers[tk], len(use)\n # use = use[-self.N_towers[tk]:]\n use = inds[-self.N_towers[tk]:]\n elif self.constraints__type == 'tower_quotas':\n pass #Just use the towers thresholded at > .5\n print use\n \n \n self.coordinates__solved_towers.append([self.coordinates__tower_sites[tk][mm] for mm in inds[use]])", "def solveOneStep(self):\n ### Student code goes here\n state = self.currentState\n #print (type(state))\n self.visited[state] = True\n #print (type(self.gm.getGameState()))\n moves = self.gm.getMovables()\n print (\"CURRENTSTATE\" + str(self.currentState.state))\n print (\"MOVABLES:\")\n if moves:\n for m in moves:\n print (str(m))\n print (\"CHILDINDEX:\")\n print (state.nextChildToVisit)\n print (\"*********\")\n if state.state == self.victoryCondition:\n return True\n #if no child to expand then go back\n if not moves or state.nextChildToVisit >= len(moves):\n self.currentState = state.parent\n if state.requiredMovable is not None:\n self.gm.reverseMove(state.requiredMovable)\n # expand\n else:\n\n next_move = moves[state.nextChildToVisit]\n self.gm.makeMove(next_move)\n state.nextChildToVisit += 1\n\n #if to parent or if visited then skip\n while (((state.parent is not None) and (self.gm.getGameState() == state.parent.state))) or GameState(self.gm.getGameState(), 0, None) in self.visited:\n print (\"PARENT FOUND!\")\n self.gm.reverseMove(next_move)\n if state.nextChildToVisit >= len(moves):\n self.currentState = state.parent\n return False\n else:\n next_move = moves[state.nextChildToVisit]\n self.gm.makeMove(next_move)\n state.nextChildToVisit += 1\n\n next_state = GameState(self.gm.getGameState(), state.depth + 1, next_move)\n next_state.parent = state\n #next_state.requiredMovable = next_move\n state.children.append(next_state)\n self.currentState = next_state\n print (state.nextChildToVisit)\n return False", "def _solve(self) -> CasADiArrayType:\n solver_input = {\"x0\": self.x0, \"p\": self.p}\n if self.opt_type in CONSTRAINED_OPT:\n solver_input[\"lbg\"] = self._lbg\n solver_input[\"ubg\"] = self._ubg\n self._solution = self._solver(**solver_input)\n self._stats = self._solver.stats()\n self._stats[\"solution\"] = self._solution\n return self._solution[\"x\"]", "def solveOneStep(self):\n ### Student code goes here\n if self.first_step == False:\n self.first_step = True\n if self.solveOneStep():\n return True\n if self.queue:\n self.gm_init()\n ele = self.queue.get()\n #print (len(ele))\n state = ele[0]\n premoves = ele[1]\n\n for m in premoves:\n self.gm.makeMove(m)\n if state.state == self.victoryCondition:\n return True\n self.visited[state] = True\n print(\"CURRENTSTATE:\")\n print(self.gm.getGameState())\n print(\"*******\")\n moves = self.gm.getMovables()\n for m in moves:\n self.gm.makeMove(m)\n if (((state.parent is not None) and (self.gm.getGameState() == state.parent.state))) or GameState(self.gm.getGameState(), 0, None) in self.visited:\n self.gm.reverseMove(m)\n continue\n self.visited[GameState(self.gm.getGameState(), 0, None)] = True\n new_pmv = [i for i in premoves]\n new_pmv.append(m)\n next_state = GameState(self.gm.getGameState(), state.depth+1, m)\n next_state.parent = state\n state.children.append(next_state)\n self.queue.put([next_state, new_pmv])\n self.gm.reverseMove(m)\n self.currentState = state\n\n #for i in range(len(premoves)-1, -1, -1):\n # mv = premoves[i]\n # self.gm.reverseMove(mv)\n return False", "def spreadOutAndFindDot(self, gameState):\n # Here are some useful elements of the startState\n currentPosition = gameState.getPacmanPosition(self.index)\n foodList = gameState.getFood().asList()\n walls = gameState.getWalls()\n randomFood = []\n problem = []\n\n #problem = AnyFoodSearchProblem(gameState, self.index)\n\n # if min(manhattan(currentPosition, foodPosition) for foodPosition in food.asList()) > 10:\n # return [Directions.STOP]\n #print(\"self.targets = \", self.targets)\n if self.index == 0:\n TargetFood = ClosestFood(currentPosition, foodList)\n #self.targets.append(TargetFood)\n problem = PositionSearchProblem(gameState, 0, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n if self.index == 1:\n TargetFood = ClosestFood(currentPosition, foodList)\n \"\"\"\n want to find a way to avoid both agents coming up with the same target. But the below doesn't work because\n each agent has their own self.targets. How to keep a common list of targets?\n \"\"\"\n # if TargetFood in self.targets:\n # tempFoodList = foodList.copy()\n # tempFoodList.pop(tempFoodList.index(TargetFood))\n # TargetFood = ClosestFood(currentPosition, tempFoodList)\n # self.targets.append(TargetFood)\n # else:\n # self.targets.append(TargetFood)\n problem = PositionSearchProblem(gameState, 1, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n if self.index == 2:\n TargetFood = RandomFood(currentPosition, foodList)\n problem = PositionSearchProblem(gameState, 2, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n if self.index == 3:\n TargetFood = RandomFood(currentPosition, foodList)\n problem = PositionSearchProblem(gameState, 3, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n #return search.bfs(problem)\n\n #util.raiseNotDefined()", "def solve(self):\n while self.counter[-1] != len(self.sequences[-1]) + 1:\n basepair = self.generatebasepairs(self.counter) # Get the combination for the current coordination\n moves = self.generatemoves(basepair) # Get all possible ways to get to this current coordination\n\n maxscore = -100000000 # set the maxscore to a value which is always lower than possible scores\n bestmove = None\n\n # FOr each move calculate score\n for move in moves:\n coordinates = self.generatecoordinates(move, self.counter) # generate the origin coordinate for the current move\n score = self.retrievematrixelement(coordinates).score # Get the score at the origin coordinate\n pairs = self.getallpairs(move) # Get all pairs possible for the current move\n scores = [self.scorePair(u) for u in pairs] # Generate scores for all pairs\n newscore = score + sum(scores) # Add generated scores to origin score\n if newscore > maxscore:\n maxscore = newscore\n bestmove = coordinates\n\n self.enterelement(self.counter, Score(bestmove, maxscore))\n self.increase()", "def calculate_appropriate_target(self):\n pass", "def calculate_appropriate_target(self):\n pass", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def is_solvable(self):\n self_copy = deepcopy(self)\n return self_copy.solve()", "def is_solved(self):\n raise NotImplementedError()", "def postsolve_array(sol, dname):\n tmp_sol = {}\n for i in range(len(np.array(dxlist))):\n tmp_sol[dxlist[i]] = sol[i][-1] ## I anyway need to get all states to properly parse 1d sol\n ##\n sol_out = {} ## main dict\n for p in dname:\n if any([n == p for n in tmp_sol.keys()]):\n sol_out[p] = tmp_sol[p] ## for states, directly take their end-point population\n elif p == 'err':\n sol_out[p] = tmp_sol['P_w'] / (tmp_sol['P_r'] + tmp_sol['P_w']) ## error rate\n elif p == 'eta':\n sol_out[p] = tmp_sol['P_w'] / (tmp_sol['P_r']) ## error rate\n elif p == 'pwc_c4':\n sol_out[p] = tmp_sol['C4_nc_WC'] / (tmp_sol['C4_nc_WC'] + tmp_sol['C4_nc_wb']) ## population of WC in C4\n elif p == 'pwc_c3':\n sol_out[p] = tmp_sol['C3_nc_WC'] / (tmp_sol['C3_nc_WC'] + tmp_sol['C3_nc_wb']) ## population of WC in C3\n elif p == 'pnc_wc':\n sol_out[p] = tmp_sol['C4_nc_WC']/ (tmp_sol['C4_nc_WC'] + tmp_sol['C4_c']) ## population of C4_WC related to C4_C\n elif p == 'pnc':\n sol_out[p] = (tmp_sol['C4_nc_WC'] + tmp_sol['C4_nc_wb']) / (tmp_sol['C4_nc_WC'] + tmp_sol['C4_nc_wb'] + tmp_sol['C4_c'])\n elif p == 'pnc_c3':\n sol_out[p] = (tmp_sol['C3_nc_WC'] + tmp_sol['C3_nc_wb']) / (tmp_sol['C3_nc_WC'] + tmp_sol['C3_nc_wb'] + tmp_sol['C3_c'])\n else:\n print('no formula for property %(p)s was provided!' % vars())\n sol_out[p] = np.nan\n ##\n return sol_out", "def actualSolve(self, lp):\n\t\t\traise RuntimeError, \"COIN_MEM: Not Available\"", "def solve(self):\r\n queue = collections.deque([Node(self.start)])\r\n seen = set()\r\n seen.add(queue[0].state)\r\n while queue:\r\n queue = collections.deque(sorted(list(queue), key=lambda node: node.f))\r\n node = queue.popleft()\r\n if node.solved:\r\n return node.path\r\n\r\n for move, action in node.actions:\r\n child = Node(move(), node, action)\r\n\r\n if child.state not in seen:\r\n queue.appendleft(child)\r\n seen.add(child.state)", "def solve(self):\n # check for jacobian and set it if present and to be used\n if self.use_sparse:\n if self._use_jac and hasattr(self.problem,'sparse_jac'):\n jac = self.problem.sparse_jac\n else:\n jac = None\n else:\n if self._use_jac and hasattr(self.problem,'jac'):\n jac = self.problem.jac\n else:\n jac = None\n \n # Initialize solver and solve \n \n solved = False\n local_min = False\n\n res = N.zeros(self.x0.__len__())\n while (not solved) and self.reg_count < 2:\n try:\n if self._use_fscale:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,self.fscale)\n else:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,None)\n start = time.clock()\n res = self.solver.KINSOL_solve(not self._use_ls)\n stop = time.clock()\n self.exec_time += (stop - start)\n solved = True\n except KINError as error:\n if error.value == 42:\n # Try the heuristic\n if hasattr(self.problem, 'get_heuristic_x0'):\n print \"----------------------------------------------------\"\n print \" Solver stuck with zero step-length.\"\n print \"----------------------------------------------------\"\n print \"The following variables have start value zero\"\n print \"and min set to zero causing the zero step-lenght.\"\n print \"These settings are either set by default or by user.\"\n print \"\"\n\n self.x0 = self.problem.get_heuristic_x0()\n self.reg_count += 1\n \n print \"\"\n print \"This setting (start and min to zero) can often\"\n print \"cause problem when initializing the system. \"\n print \"\"\n print \"To avoid this the above variables have\"\n print \"their start attributes reset to one.\"\n print \"\"\n print \"Trying to solve the system again...\"\n else:\n raise KINSOL_Exception(\"Regularization failed due to constraints, tried getting heuristic initial guess but failed.\")\n \n\n elif (error.value == 2):\n print \"---------------------------------------------------------\"\n print \"\"\n print \" !!! WARNING !!!\"\n print \"\"\n print \" KINSOL has returned a result but the algorithm has converged\"\n print \" to a local minima, the initial values are NOT consistant!\"\n print \"\"\n print \"---------------------------------------------------------\"\n solved = True\n local_min = True\n else:\n # Other error, send onward as exception\n self.problem.check_constraints(res)\n raise KINSOL_Exception(error.msg[error.value])\n \n if not solved:\n self.solver.Free_KINSOL()\n raise KINSOL_Exception(\"Algorithm exited solution loop without finding a solution, please contact Assimulo support.\")\n\n if self.check_with_model:\n self.problem.check_constraints(res)\n if not local_min:\n print \"Problem sent to KINSOL solved.\"\n \n return res", "def solve(n_vec, m_vec, p_vec, repeat, dns_level, seed, solver='gurobi'):\n\n print(\"Solving random problems with solver %s\\n\" % solver)\n\n # Define statistics to record\n std_solve_time = np.zeros(len(n_vec))\n avg_solve_time = np.zeros(len(n_vec))\n min_solve_time = np.zeros(len(n_vec))\n max_solve_time = np.zeros(len(n_vec))\n\n n_prob = len(n_vec)\n\n # Store also OSQP time\n if solver == 'miosqp':\n # Add OSQP solve times statistics\n avg_osqp_solve_time = np.zeros(len(n_vec))\n\n # reset random seed\n np.random.seed(seed)\n\n for i in range(n_prob):\n\n # Get dimensions\n n = n_vec[i]\n m = m_vec[i]\n p = p_vec[i]\n\n print(\"problem n = %i, m = %i, p = %i\" % (n, m, p))\n\n # Define vector of cpu times\n solve_time_temp = np.zeros(repeat)\n\n # Store also OSQP time\n if solver == 'miosqp':\n osqp_solve_time_temp = np.zeros(repeat)\n\n for j in tqdm(range(repeat)):\n # for j in range(repeat):\n\n # Generate random vector of indeces\n i_idx = np.random.choice(np.arange(0, n), p, replace=False)\n\n # Generate random Matrices\n Pt = spa.random(n, n, density=dns_level)\n P = spa.csc_matrix(np.dot(Pt, Pt.T))\n q = sp.randn(n)\n A = spa.random(m, n, density=dns_level)\n u = 2 + sp.rand(m)\n l = -2 + sp.rand(m)\n\n # Enforce [0, 1] bounds on variables\n i_l = np.zeros(p)\n i_u = np.ones(p)\n # A, l, u = miosqp.add_bounds(i_idx, 0., 1., A, l, u)\n\n if solver == 'gurobi':\n # Solve with gurobi\n prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u)\n res_gurobi = prob.solve(solver=mpbpy.GUROBI,\n verbose=False, Threads=1)\n if res_gurobi.status != 'optimal':\n import ipdb\n ipdb.set_trace()\n solve_time_temp[j] = 1e3 * res_gurobi.cputime\n\n elif solver == 'miosqp':\n # Define problem settings\n miosqp_settings = {\n # integer feasibility tolerance\n 'eps_int_feas': 1e-03,\n # maximum number of iterations\n 'max_iter_bb': 1000,\n # tree exploration rule\n # [0] depth first\n # [1] two-phase: depth first until first incumbent and then best bound\n 'tree_explor_rule': 1,\n # branching rule\n # [0] max fractional part\n 'branching_rule': 0,\n 'verbose': False,\n 'print_interval': 1}\n\n osqp_settings = {'eps_abs': 1e-03,\n 'eps_rel': 1e-03,\n 'eps_prim_inf': 1e-04,\n 'verbose': False}\n\n model = miosqp.MIOSQP()\n model.setup(P, q, A, l, u, i_idx, i_l, i_u,\n miosqp_settings,\n osqp_settings)\n res_miosqp = model.solve()\n\n # DEBUG (check if solutions match)\n # prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u)\n # res_gurobi = prob.solve(solver=mpbpy.GUROBI, verbose=False)\n # if (np.linalg.norm(res_gurobi.x - res_miosqp.x) /\n # np.linalg.norm(res_gurobi.x)) > 1e-02:\n # import ipdb; ipdb.set_trace()\n#\n # import ipdb; ipdb.set_trace()\n\n if res_miosqp.status != miosqp.MI_SOLVED:\n import ipdb\n ipdb.set_trace()\n \n # Solution time \n solve_time_temp[j] = 1e3 * res_miosqp.run_time\n\n # Store OSQP time in percentage\n if solver == 'miosqp':\n osqp_solve_time_temp[j] = \\\n 100 * (res_miosqp.osqp_solve_time / res_miosqp.run_time)\n\n # Get time statistics\n std_solve_time[i] = np.std(solve_time_temp)\n avg_solve_time[i] = np.mean(solve_time_temp)\n max_solve_time[i] = np.max(solve_time_temp)\n min_solve_time[i] = np.min(solve_time_temp)\n\n # Store also OSQP time\n if solver == 'miosqp':\n avg_osqp_solve_time[i] = np.mean(osqp_solve_time_temp)\n\n # Create pandas dataframe for the results\n df_dict = {'n': n_vec,\n 'm': m_vec,\n 'p': p_vec,\n 't_min': min_solve_time,\n 't_max': max_solve_time,\n 't_avg': avg_solve_time,\n 't_std': std_solve_time}\n\n # Store also OSQP time\n if solver == 'miosqp':\n df_dict.update({'t_osqp_avg': avg_osqp_solve_time})\n\n timings = pd.DataFrame(df_dict)\n\n return timings", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def make_target(self, state_index, traj):\n\n # The value target is the discounted root value of the search tree N steps\n # into the future, plus the discounted sum of all rewards until then.\n targets = []\n root_values = traj[\"root_value\"]\n rewards = traj[\"reward\"]\n child_visits = traj[\"child_visits\"]\n target_value = traj[\"target_value\"]\n obs = traj[\"cur_state\"]\n\n for current_index in range(state_index, state_index + self.unroll_step + 1):\n\n if current_index < len(root_values):\n targets.append((target_value[current_index], rewards[current_index], child_visits[current_index]))\n else:\n # States past the end of games are treated as absorbing states.\n targets.append((0, 0, []))\n return targets", "def solve(m):\n\t\n #with the assumption that at least one terminal state is given:\n if(len(m)==2 or len(m)==1): return [1,1]\n \n #Normalizing the in. matrix and identifying the trans./abs. states:\n m = normalizeProbabilityMatrix(m)\n t = getTransientStates(m)\n a = getAbsorbingStates(m)\n\t\n if len(a) >0:\n print( str(len(a)) + \" absorbing state\" + (\"\" if len(a)<=1 else \"s\" ))\n else:\n print(\"No absorbing state detected\")\n return\n \n #Getting the matrices Q and R as in the canonical form:\n Q = getQ(m,t)\n R = getR(m,t,a)\n I = getIdentity(len(Q))\n I_Q = subtractMatrices(I, Q)\n \n #Getting the fundamental matrix\n N = invertMatrix(I_Q)\n F = multiplyMatrices(N,R)\n \n #packing the result with a common denominator:\n gcd = getGCD(F[0]).denominator\n res=[]\n sum = 0\n for r in F[0]:\n val = int(r.numerator*(gcd/r.denominator))\n sum+=val\n res.append(val)\n res.append(sum) \n return res", "def solve(self):\n # Use a trivial tour (1-2-3-...-N-1) to set the global upper bound.\n tour = list(range(self._N))\n upper_bound = sum([self._G[i][(i + 1) % self._N] for i in range(self._N)])\n trace = []\n\n # Start from a configuration with a single vertex.\n frontier = [BranchAndBoundConfiguration(self._G, self._N, [0], LOWER_BOUND_METHOD)]\n\n # Set the start time.\n start_time = time.time()\n\n # Branch and bound until the frontier set is empty or the time has expired.\n while frontier and (time.time() - start_time) < self._cutoff_time:\n # Fetch the most promising configuration.\n config = heappop(frontier)\n\n # Expand configuration by appending a vertex to the path.\n for v in range(self._N):\n try:\n expanded_config = config.expand(v)\n except ValueError:\n # Expanded configuration is not valid.\n continue\n if expanded_config.is_solution():\n # Update the global upper bound, if needed.\n this_solution = expanded_config.get_cycle_cost()\n if this_solution < upper_bound:\n # Log it.\n trace.append((time.time() - start_time, this_solution))\n # Update the best solution.\n upper_bound = this_solution\n tour = list(expanded_config.get_path())\n elif expanded_config.get_lower_bound() < upper_bound:\n # Add to the frontier set.\n heappush(frontier, expanded_config)\n return (upper_bound, [self._index_to_id[v] for v in tour], trace)", "def get_ddqn_targets(qsa_target, q_targets, mask, estimator, next_states):\n with torch.no_grad():\n next_q_values = estimator(next_states)\n argmax_actions = next_q_values.max(1, keepdim=True)[1]\n qsa_target[mask] = q_targets.gather(1, argmax_actions)\n return qsa_target", "def solve(self,**kwargs) :\n\t\tlogging.debug('Beginning AR solver')\t\n\t\n\t\t# List of trackers for all experiments\n\t\tself.all_exp_art = []\t\n\n\t\t# Execute the AR core\n\t\tfor expid,exp in enumerate(self.ss.experiments) : \n\t\t\t\tself.exp_art = {} # AR tracker for a single experiments\n\t\t\t\tself.exp_art['id'] = expid+1\n\t\t\t\tself.exp_art['eqns'] = []\n\t\t\t\tself._core(exp,**kwargs)\n\t\t\t\tself.all_exp_art.append(self.exp_art)\n\n\t\t#Run post processing steps\n\t\tself._postprocessor()", "def run_agent(self):\n '''\n The child class is created in order to override the value method which will take into account\n previous states of the agent. The reward function is made exponential rather than linear, which\n decreases the probability of agent visiting the state which was already visited.\n '''\n\n class GraphProblemSpecialised(GraphProblem):\n def __init__(self, initial, goal, graph, reward_dict, already_visited_reward):\n GraphProblem.__init__(self, initial, goal, graph)\n self.rewards = reward_dict\n self.already_visited_reward = already_visited_reward\n\n def value(self, state, history):\n if state in history.keys():\n return self.rewards[state] - (self.already_visited_reward ** history[state])\n return self.rewards[state]\n\n # -- Load and init mission --#\n print('Generate and load the ' + self.mission_type + ' mission with seed ' + str(\n self.mission_seed) + ' allowing ' + self.AGENT_MOVEMENT_TYPE + ' movements')\n mission_xml = init_mission(self.agent_host, self.agent_port, self.AGENT_NAME, self.mission_type,\n self.mission_seed, self.AGENT_MOVEMENT_TYPE)\n self.solution_report.setMissionXML(mission_xml)\n self.solution_report.start()\n\n # -- Get the state of the world along with internal state... --#\n state_t = self.agent_host.getWorldState()\n\n # -- Main loop: --#\n while state_t.is_mission_running:\n state_t = self.agent_host.getWorldState()\n\n # -- Check if anything went wrong along the way --#\n for error in state_t.errors:\n print(\"Error:\", error.text)\n\n # -- Oracle and Internal Sensors--#\n if state_t.number_of_observations_since_last_state > 0: # Has any Oracle-like and/or internal sensor observations come in?\n msg = state_t.observations[-1].text # Get the details for the last observed state\n oracle_and_internal = json.loads(msg) # Parse the Oracle JSON\n # print oracle_and_internal\n # -- Oracle sensor --#\n grid = oracle_and_internal.get(u'grid', 0) # Demo only, string with the\n # -- GPS-like sensor --#\n xpos = oracle_and_internal.get(u'XPos', 0) # Demo only, position in 2D plane, 1st axis\n zpos = oracle_and_internal.get(u'ZPos', 0) # Demo only, position in 2D plane, 2nd axis (yes Z!)\n ypos = oracle_and_internal.get(u'YPos', 0) # Demo only, height as measured from surface! (yes Y!)\n\n current_location_id = self._get_current_location_id((xpos, zpos))\n\n if self.history.get(current_location_id):\n self.history[current_location_id] += 1\n else:\n self.history[current_location_id] = 1\n\n if self.first_time_flag:\n start_location = current_location_id\n\n # Gets the possible actions and checks for goal block\n state_space_actions, state_space_locations, reached_goal = self._get_local_state_space_actions(grid,\n [xpos,\n zpos])\n\n if reached_goal:\n break\n\n self.rewards_by_location_id, self.previous_rewards_total = self._get_rewards(\n self.rewards_by_location_id,\n state_space_actions,\n state_space_locations,\n oracle_and_internal,\n self.previous_rewards_total)\n\n # Creates a graph for local (neighbourhood) actions\n local_map = UndirectedGraph(state_space_actions)\n local_problem = GraphProblemSpecialised(initial=start_location, goal=None, graph=local_map,\n reward_dict=self.rewards_by_location_id,\n already_visited_reward=self.already_visited_reward)\n # Looks for most optimal next step\n next_step = self.hill_climbing(local_problem, self.history)\n\n # If the next step is already in history the reward will be exponentially increased (but is negated)\n if next_step in self.history.keys():\n print \"You already visited that so will be punished!\"\n self.rewards_by_location_id[next_step] -= self.already_visited_reward ** self.history[next_step]\n self.history[next_step] += 1\n\n next_x, next_z = state_space_locations[next_step]\n self.agent_host.sendCommand(\"tp \" + str(next_x) + \" \" + str(217) + \" \" + str(next_z))\n time.sleep(0.1)\n\n # -- Collect the number of rewards and add to reward_cumulative --#\n # -- Note: Since we only observe the sensors and environment every a number of rewards may have accumulated in the buffer --#\n for reward_t in state_t.rewards:\n print(\"Reward_t:\", reward_t.getValue())\n self.solution_report.addReward(reward_t.getValue(), datetime.datetime.now())\n\n # -- Check if anything went wrong along the way --#\n for error in state_t.errors:\n print(\"Error:\", error.text)\n\n print(\"Mission has ended.either because time has passed or you reached the goal.\")\n return", "def solve(self):\n self.last_result = None\n\n # Check solve with start/next\n if self.context.solver.solve_with_search_next:\n return self.solve_with_search_next()\n\n # Notify listeners\n self._notify_listeners_start_operation(listener.OPERATION_SOLVE)\n\n # Solve model\n self._check_status(STATUS_IDLE)\n self._set_status(STATUS_SOLVING)\n stime = time.time()\n try:\n msol = self.agent.solve()\n except Exception as e:\n # Check if aborted in the mean time\n if self._check_status_aborted():\n return self.last_result\n if self.context.log_exceptions:\n traceback.print_exc()\n raise e\n self._set_status(STATUS_IDLE)\n stime = time.time() - stime\n self.context.solver.log(1, \"Model '\", self.model.get_name(), \"' solved in \", round(stime, 2), \" sec.\")\n msol.process_infos[CpoProcessInfos.SOLVE_TOTAL_TIME] = stime\n\n # Store last solution\n self.last_result = msol\n\n # Notify listeners\n for lstnr in self.listeners:\n lstnr.new_result(self, msol)\n self._notify_listeners_end_operation()\n\n # Return solution\n return msol" ]
[ "0.7180512", "0.6942709", "0.6700484", "0.662909", "0.662909", "0.6619703", "0.6546795", "0.63575697", "0.6320139", "0.6305795", "0.6301925", "0.6139513", "0.6138511", "0.608324", "0.6081203", "0.6034735", "0.6010343", "0.5882625", "0.5849981", "0.57675886", "0.57673275", "0.5757477", "0.5746916", "0.5746916", "0.5746916", "0.5733201", "0.57303125", "0.57287914", "0.56968606", "0.56803614", "0.5679954", "0.56718117", "0.56534547", "0.56534547", "0.5646806", "0.5646806", "0.5646806", "0.5627925", "0.5624859", "0.5614215", "0.56054014", "0.55999196", "0.5595345", "0.557982", "0.55726254", "0.5559154", "0.5531822", "0.5521642", "0.550857", "0.55081725", "0.54910845", "0.54909736", "0.5454062", "0.54523", "0.542878", "0.5428163", "0.5421851", "0.54185635", "0.53925264", "0.5390884", "0.5389152", "0.53865325", "0.53749734", "0.53741497", "0.5367182", "0.5363517", "0.5361255", "0.53591704", "0.5353833", "0.53409624", "0.5340865", "0.5332879", "0.5327909", "0.53224546", "0.5321303", "0.5315175", "0.53079027", "0.5305751", "0.52971536", "0.5291692", "0.5289162", "0.52882737", "0.5288221", "0.52850705", "0.52850705", "0.527878", "0.5269178", "0.5266971", "0.5263537", "0.5259116", "0.5256305", "0.5253826", "0.52202296", "0.52179813", "0.5217724", "0.5217265", "0.52135485", "0.52095985", "0.520506", "0.5204332", "0.5201977" ]
0.0
-1
Our goal is to get the edges split into high/low groups but we do not care what the final orienation is of the edges. Each edge can either be in its final orientation or not so there are (2^12)/2 or 2048 possible permutations. The /2 is because there cannot be an odd number of edges not in their final orientation.
def eo_edges(self): logger.info("eo_edges called") permutations = [] original_state = self.state[:] original_solution = self.solution[:] tmp_solution_len = len(self.solution) # Build a list of the wing strings at each midge wing_strs = [] for _, square_index, partner_index in midges_recolor_tuples_555: square_value = self.state[square_index] partner_value = self.state[partner_index] wing_str = square_value + partner_value wing_str = wing_str_map[square_value + partner_value] wing_strs.append(wing_str) # build a list of all possible EO permutations...an even number of edges must be high for num in range(4096): num = str(bin(num)).lstrip("0b").zfill(12) if num.count("1") % 2 == 0: permutations.append(list(map(int, num))) # Put all 2048 starting states in a file and point ida-via-graph # at the file so it can solve all of them and apply the one that is the shortest. lr_center_stage_states = [] eo_outer_orbit_states = [] eo_inner_orbit_states = [] for permutation in permutations: must_be_uppercase = [] must_be_lowercase = [] self.state = original_state[:] for wing_str, uppercase in zip(wing_strs, permutation): if uppercase: must_be_uppercase.append(wing_str) else: must_be_lowercase.append(wing_str) # logger.info("%s: %s permutation %s" % (self, index, "".join(map(str, permutation)))) self.edges_flip_orientation(must_be_uppercase, must_be_lowercase) # build lists of the states that we need to find state_indexes for lr_center_stage_states.append(self.lt_phase3_lr_center_stage.state()) eo_outer_orbit_states.append(self.lt_phase3_eo_outer_orbit.state()) eo_inner_orbit_states.append(self.lt_phase3_eo_inner_orbit.state()) # now we have a huge list of states to lookup, do a binary search on multiple states at once (this is drastically faster # than binary searching for them individually). state_index_multiple() will return a dict where the state is the key # and the state_index is the value. lr_center_stage_eo_inner_orbit_state_indexes = self.lt_phase3_lr_center_stage.state_index_multiple( lr_center_stage_states ) eo_outer_orbit_state_indexes = self.lt_phase3_eo_outer_orbit.state_index_multiple(eo_outer_orbit_states) eo_inner_orbit_state_indexes = self.lt_phase3_eo_inner_orbit.state_index_multiple(eo_inner_orbit_states) # build a list of tuples of the state indexes pt_state_indexes = [] for lr_center_stage_eo_inner_orbit_state, eo_outer_orbit_state, eo_inner_orbit_state in zip( lr_center_stage_states, eo_outer_orbit_states, eo_inner_orbit_states ): pt_state_indexes.append( ( lr_center_stage_eo_inner_orbit_state_indexes[lr_center_stage_eo_inner_orbit_state], eo_outer_orbit_state_indexes[eo_outer_orbit_state], eo_inner_orbit_state_indexes[eo_inner_orbit_state], ) ) self.state = original_state[:] self.solution = original_solution[:] # When solve_via_c is passed pt_state_indexes (2048 lines of states in this case), it will try all 2048 of them # to find the state that has the shortest solution. self.lt_phase3.solve_via_c(pt_states=pt_state_indexes) self.print_cube_add_comment("edges EOed into high/low groups", tmp_solution_len) self.post_eo_state = self.state[:] self.post_eo_solution = self.solution[:] # re-color the cube so that the edges are oriented correctly so we can # pair 4-edges then 8-edges. After all edge pairing is done we will uncolor # the cube and re-apply the solution. self.edges_flip_orientation(wing_strs, []) self.highlow_edges_print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subdivide_ngons(faces: Iterable[Sequence[Union[Vector, Vec2]]]) -> Iterable[\n List[Vector]]:\n for face in faces:\n if len(face) < 5:\n yield face\n else:\n mid_pos = sum(face) / len(face)\n for index, vertex in enumerate(face):\n yield face[index - 1], vertex, mid_pos", "def split_edges(self, maximum_distance):\n \"\"\" Iterate through the vertices of each section. For each vertex v, evaluate edges for which v is a source.\n If an edge of weight greater than maximum_distance, then split it. \"\"\"\n for section_id in self.sections:\n utils.print_progress(len(self.sections), prefix='splitting edges')\n current_section = [] # Need to update the section data after splitting the edges.\n for source in self.sections[section_id]:\n current_section.append(source)\n edges_to_remove = [] # If an edge is split, it will need to be removed.\n for edge in self.graph.get_out_edges(source):\n if self.edge_weights[edge] > maximum_distance:\n target = edge[1] # edge is a numpy array of [source, target, edge]. Select target.\n edges_to_remove.append(self.graph.edge(edge[0], edge[\n 1])) # If an edge is split, the original edge should be removed.\n\n new_edge_count = int(math.ceil(self.edge_weights[edge] / maximum_distance))\n new_edge_distance = self.edge_weights[edge] / new_edge_count\n current_point = shapes.Point.from_list(\n list(self.node_locations[source]) + [self.node_heading[target]])\n previous_vertex = source\n for _ in range(new_edge_count):\n current_point = utils.offset_point(current_point, new_edge_distance, current_point.bearing)\n current_vertex = self.graph.add_vertex()\n current_section.append(current_vertex) # The new vertex becomes a part of the section.\n \"\"\" Populate the property map for the new vertex. Inherit values from the target node,\n unless the target node is a junction node. Then inherit values from the source. \"\"\"\n self.node_locations[current_vertex] = current_point.as_list()\n self.node_heading[current_vertex] = current_point.bearing\n property_vertex = source if not self.junctions[target] else target\n self.node_speed_limit[current_vertex] = self.node_speed_limit[property_vertex]\n self.node_width[current_vertex] = self.node_width[property_vertex]\n self.node_id[current_vertex] = self.node_id[property_vertex]\n\n \"\"\" Create an edge between the previous vertex and the newly created vertex, \n and update the edge weight property map. \"\"\"\n current_edge = self.graph.add_edge(previous_vertex, current_vertex)\n self.edge_weights[current_edge] = new_edge_distance\n\n # The current vertex becomes the previous vertex in the next step.\n previous_vertex = current_vertex\n\n \"\"\" Create an edge between the last new vertex that was created and the target of the\n original edge which is being split, and update the property map. \"\"\"\n self.edge_weights[self.graph.add_edge(previous_vertex, target)] = new_edge_distance\n list(map(self.graph.remove_edge, edges_to_remove)) # Remove all relevant edges\n self.sections[section_id] = current_section # Update the section with the new vertices", "def odd_decomposition(G) -> Tuple[Set[int], Set[int]]:\n multi = isinstance(G, nx.MultiGraph)\n\n # create a MultiGraph copy of G\n G = nx.MultiGraph(G)\n\n # remove isolated nodes from the graph, since they are irrelevant and we are working on a copy\n G.remove_nodes_from(list(nx.isolates(G)))\n\n # if the base graph is already odd\n if is_odd(G):\n return (set(G.edges(keys=multi)), set())\n\n odd_subgraph = G.subgraph(odd_nodes(G))\n even_subgraph = G.subgraph(even_nodes(G))\n\n odd_components = nx.connected_components(odd_subgraph)\n even_components = nx.connected_components(even_subgraph)\n\n # use the same notation as in our source paper\n X = list(odd_components)\n Y, Z = partition(even_components, lambda x: len(x) % 2 == 0)\n\n lX, lY, lZ = len(X), len(Y), len(Z)\n linear_system = Matrix(lY + lZ, lX + 1, bin_field)\n\n # create a linear system over GF(2) as described in our source paper\n for i, Yi in enumerate(Y):\n for j, Xi in enumerate(X):\n if n_joining(G, Xi, Yi) % 2 == 1:\n linear_system.set(i, j, 1)\n else:\n linear_system.set(i, j, 0)\n linear_system.set(i, lX, 1)\n\n for i, Zi in enumerate(Z):\n for j, Xi in enumerate(X):\n if n_joining(G, Xi, Zi) % 2 == 1:\n linear_system.set(i + lY, j, 1)\n else:\n linear_system.set(i + lY, j, 0)\n linear_system.set(i + lY, lX, 0)\n\n # transform the system into RREF\n linear_system.reduced_row_echelon_form()\n red = set()\n\n # we only need one solution\n # every non pivot is set to 0 (meaning blue)\n # while every pivot matches the augmented value\n # only create the red set since this is the only one we need\n\n for i in range(lY + lZ):\n val = linear_system.get(i, lX)\n # don't cross the last column (augmented part)\n for j in range(lX):\n v = linear_system.get(i, j)\n if v == 1:\n if val == 1:\n red.add(j)\n break\n else:\n if val == 1:\n # if any of the lines has no pivot and 1 as the augmented value this system is not solvable\n raise NotDecomposableError(\"The graph is not decomposable due to unsolvable system\")\n else:\n # this is a zero row ane there are no pivots after this row\n break\n\n red_nodes = set.union(*[X[i] for i in red]) if red else set()\n # edges adjacent to red nodes\n red_edges = set(G.edges(nbunch=red_nodes, keys=True))\n # calculate the red degree for every even node\n degrees = dict()\n for i in even_subgraph.nodes():\n degrees[i] = 0\n\n for i, j, k in red_edges:\n if i in degrees:\n degrees[i] += 1\n if j in degrees:\n degrees[j] += 1\n\n # T set is the set of all nodes of even red degree\n T = {i for i, deg in degrees.items() if deg % 2 == 0}\n\n # compute the T-join and mark the nodes as red/blue\n red_join = T_join(even_subgraph, T)\n\n red_edges.update(red_join)\n blue_edges = set(G.edges(keys=True)).difference(red_edges)\n\n if not multi:\n red_edges = {(i, j) for i, j, k in red_edges}\n blue_edges = {(i, j) for i, j, k in blue_edges}\n\n return (red_edges, blue_edges)", "def partition_girvan_newman(graph, max_depth):\n ###TODO\n pass", "def generateArray(nEdge):\n oArray = np.zeros([nEdge**2, nEdge**2])\n \n for i in range(nEdge**2):\n for j in range(nEdge**2):\n if j == i+nEdge or j == i-nEdge or j == i+1 and i % nEdge != nEdge-1 or j == i-1 and i % nEdge != 0: # sweet...\n oArray[i, j] = 1\n \n return oArray", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def graph_generation(nodes: np.ndarray, edges: np.ndarray):\n result_nodes = []\n result_edges = []\n\n last_index = nodes[-1, 0]\n\n done = False\n # print(nodes)\n for p in range(nodes.shape[0]):\n for q in range(nodes.shape[0]):\n if list(nodes[p, 1:-2]) == (list(nodes[q, 1:-2])) and nodes[p, nodes.shape[1] - 2] < \\\n nodes[q, nodes.shape[1] - 2]:\n tmp_node = np.append(nodes[p, 1:], np.append(nodes[q, nodes.shape[1] - 2],\n np.append(nodes[q, nodes.shape[1] - 1],\n np.append(nodes[p, 0],\n nodes[q, 0]))))\n if not done:\n result_nodes = [tmp_node]\n done = True\n else:\n result_nodes = np.concatenate((result_nodes, [tmp_node]))\n\n result_nodes = result_nodes[np.argsort(\n result_nodes[:, [e for e in range(1, result_nodes.shape[1] - 2) if e % 2 != 0]].sum(axis=1)), :]\n\n result_nodes = np.c_[range(last_index + 1, last_index + 1 + result_nodes.shape[0]), result_nodes]\n # print(result_nodes)\n\n done = False\n for e in range(edges.shape[0]):\n for f in range(edges.shape[0]):\n for p in range(result_nodes.shape[0]):\n for q in range(result_nodes.shape[0]):\n if (edges[e, 0] == result_nodes[p, -2] and edges[e, 1] == result_nodes[q, -2] and edges[\n f, 0] == result_nodes[p, -1] and edges[f, 1] == result_nodes[q, -1]) \\\n or (edges[e, 0] == result_nodes[p, -2] and edges[e, 1] == result_nodes[q, -2] and\n result_nodes[p, -1] == result_nodes[q, -1]) \\\n or (edges[e, 0] == result_nodes[p, -1] and edges[e, 1] == result_nodes[q, -1] and\n result_nodes[p, -2] == result_nodes[q, -2]):\n if not done:\n result_edges = [[result_nodes[p, 0], result_nodes[q, 0]]]\n done = True\n else:\n result_edges = np.concatenate(\n (result_edges, [[result_nodes[p, 0], result_nodes[q, 0]]]), axis=0)\n # print(edges)\n\n # print(result_edges)\n unique_result_edges = list(Counter(str(e) for e in result_edges).keys())\n # print(unique_result_edges)\n final_edges = []\n for k in range(len(unique_result_edges)):\n for j in range(result_edges.shape[0]):\n if str(result_edges[j]) == unique_result_edges[k]:\n if k == 0:\n final_edges = result_edges[j]\n break\n else:\n final_edges = np.concatenate((final_edges, result_edges[j]))\n break\n final_edges = np.reshape(final_edges, (int(final_edges.shape[0] / 2), 2))\n # print(final_edges.shape[0])\n done = False\n edge_to_remove = []\n for j in range(final_edges.shape[0]):\n for k in range(j + 1, final_edges.shape[0]):\n if final_edges[j, 1] == final_edges[k, 0]:\n if not done:\n edge_to_remove = [[final_edges[j, 0], final_edges[k, 1]]]\n done = True\n else:\n edge_to_remove = np.concatenate((edge_to_remove, [[final_edges[j, 0], final_edges[k, 1]]]))\n # print(edge_to_remove)\n\n idx_to_remove = []\n done = False\n for j in range(edge_to_remove.shape[0]):\n for k in range(final_edges.shape[0]):\n if list(edge_to_remove[j]) == list(final_edges[k]):\n if not done:\n idx_to_remove = k\n done = True\n else:\n idx_to_remove = np.append(idx_to_remove, k)\n final_edges = np.delete(final_edges, idx_to_remove, axis=0)\n # print(final_edges)\n result_nodes = np.delete(result_nodes, [-1, -2], 1)\n # print(result_nodes)\n return result_nodes, final_edges", "def optimal_2split(data, dicrete=True, equidistant=True, verbose=False):\n if dicrete and equidistant:\n uniqs = np.unique(data)\n uniqs = np.sort(uniqs)\n delta_ = (uniqs[1:] - uniqs[:-1]).min()\n rho_crit = 10\n n0 = data.shape[0]\n l = uniqs.max() - uniqs.min()\n rho_cur = n0 / (l / delta_)\n delta = np.ceil(rho_crit / rho_cur) * delta_\n bbs = np.arange(\n uniqs.min() - 0.5 * delta, uniqs.max() + 0.5 * delta + 1e-6 * delta, delta\n )\n # n_actual = (bbs[-1] - bbs[0])/delta\n if verbose:\n print(bbs)\n else:\n bbs = 10\n cnts, bbs = histogram(data, bbs)\n diff = cnts[1:] - cnts[:-1]\n ddif = diff[1:] - diff[:-1]\n if verbose:\n print(cnts, bbs)\n print(list(zip(range(len(cnts)), cnts)), bbs)\n print(\"f prime:\", diff)\n print(\"f double prime:\", ddif)\n # either diff == 0, or\n derivative_change = diff[1:] * diff[:-1]\n # sign change indices\n ii = flatnonzero((derivative_change < 0) & (ddif > 0))\n jj = flatnonzero(diff == 0)\n concats = np.concatenate([ii, jj])\n if verbose:\n print(\"candidate indices\")\n print(1 + ii, 1 + jj)\n print(concats)\n print(\"candidate cnts\")\n print(cnts[1 + concats])\n if concats.size > 0:\n arg_glo_min = argmin(cnts[1 + concats])\n if arg_glo_min < len(ii):\n lbbs, rbbs = bbs[1 + concats[arg_glo_min]], bbs[2 + concats[arg_glo_min]]\n optimal_split = 0.5 * (lbbs + rbbs)\n else:\n # two conseq. equal values, optinal split is between them\n optimal_split = bbs[2 + concats[arg_glo_min]]\n if verbose:\n print(\n arg_glo_min,\n 1 + concats[arg_glo_min],\n cnts[1 + concats[arg_glo_min]],\n optimal_split,\n )\n else:\n return np.nan\n return optimal_split", "def GetBoundaryEdgesPent(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n node_arranger = np.array([\n [0,1],\n [1,2],\n [2,3],\n [3,4],\n [4,0],\n ])\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],\n self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)\n\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges", "def convert_edges_perm(edges):\n L = dict(edges)\n output = [START_NODE]\n while output[-1] != END_NODE:\n output.append(L[output[-1]])\n if len(edges) + 1 != len(output):\n raise Exception()\n return output", "def connected((e,r)):\n \n # Deal with the middle case so we don't divide by zero\n if r==0: return [(1,1),(2,1),(3,1),(4,1),(5,1),(0,1)]\n # If the input is impossible, return nothing to prune the branch (shouldn't\n # happen)\n if e>=6*r: return []\n connected=[]\n mult=e//r\n rem=e % r\n #Going sideways\n toAdd=((6*r-1,r) if e==0 else (e-1,r))\n connected.append(toAdd)\n toAdd=((0,r) if e==6*r-1 else (e+1,r))\n connected.append(toAdd)\n #Going inward\n toAdd=( (0,r-1)if mult==5 and rem==r-1 else (mult*(r-1)+rem,r-1) )\n connected.append(toAdd)\n if rem!=0:\n connected.append((mult*(r-1)+rem-1,r-1))\n\n #Going outward\n if r<nLayers-1:\n connected.append((mult*(r+1)+rem,r+1))\n connected.append((mult*(r+1)+rem+1,r+1))\n if rem==0: # only case where negatives could result\n if mult>0: connected.append( (mult*(r+1)-1,r+1))\n else: connected.append( (6*(r+1)-1,r+1))\n \n return connected", "def graphs(n):\n assert n >= 0\n\n # Special cases for small vertex sets\n if n <= 2:\n if n == 0:\n yield []\n return\n if n == 1:\n yield [ [] ]\n return\n if n == 2:\n yield [ [], [] ]\n yield [ [1], [0] ]\n return\n\n # Make generator yielding all possible edges.\n # If a < b < c, then we yield edge (a,b) before (a,c).\n # If b < c < a, then we yield edge (b,a) before (c,a).\n # As a result, we will construct graph representations having sorted\n # adjacency lists, which our graph representation requires.\n alledges = ( (j, i) for i in range(n) for j in range(i) )\n\n # Generate all graphs\n # We unroll the portion of the loop dealing with edges (0,1), (0,2)\n for edges in powerset(itertools.islice(alledges, 2, None)):\n # unrolling for edges (0,1) and (0,2)\n g = [ [] for v in range(n) ]\n for e in edges:\n g[e[0]].append(e[1])\n g[e[1]].append(e[0])\n yield g\n\n # Add edge (0,1)\n g2 = g[:]\n # We can't use .insert below, since we don't want to modify the\n # items in the list we have (shallowly!) copied.\n g2[0] = [1]+g2[0]\n g2[1] = [0]+g2[1]\n yield g2\n\n # Add edge (0,2)\n g3 = g[:]\n g3[0] = [2]+g3[0]\n g3[2] = [0]+g3[2]\n yield g3\n\n # Add edges (0,1) and (0,2)\n g4 = g3[:] # Not copied from g!\n g4[0] = [1]+g4[0]\n g4[1] = [0]+g4[1]\n yield g4", "def grid_edges(num_node):\n m = math.sqrt(num_node)\n top = []\n bottom = []\n left = []\n right = []\n for node_id in range(1, num_node + 1):\n if node_id % m == 1:\n left.append(node_id)\n elif node_id % m == 0:\n right.append(node_id)\n elif node_id <= m:\n top.append(node_id)\n elif node_id >= num_node - m + 1:\n bottom.append(node_id)\n else:\n pass\n return (top, bottom, left, right)", "def createBridgeSets(blocksize,operating,MPSS):\n sets = tuple()\n xul = blocksize[0]-operating\n xdl = operating\n yul = int(blocksize[0]/2+operating)\n ydl = int(blocksize[0]/2-operating)\n xts = xul\n xbs = xdl\n for i in range(MPSS):\n sets+=(tuple(product(numpy.arange(xdl,xul,1),numpy.arange(ydl,yul,1))),)\n xdl+=operating\n xul-=operating\n ydl-=operating\n yul+=operating\n return sets,sets[::-1]", "def createDownPyramidSets(blocksize,operating):\n bsx = int(blocksize[0]/2)\n bsy = int(blocksize[1]/2)\n dl = int((bsy)-operating); #lower y\n ul = int((bsy)+operating); #upper y\n sets = tuple()\n while dl > 0:\n r = numpy.arange(dl,ul,1)\n sets+=(tuple(product(r,r)),)\n dl-=operating\n ul+=operating\n return sets", "def create_llrs_combinations_to_edges(self):\n\n generator_polys = self.code_gm[0, :self.rate_inverse * (int(np.log2(self.n_states)) + 1)]\n generator_polys = generator_polys.reshape(int(np.log2(self.n_states)) + 1, -1).T\n generator_polys = np.fliplr(generator_polys)\n states_binary_combinations = np.array(\n list(itertools.product(range(2), repeat=int(np.log2(self.n_states))))).repeat(2, axis=0)\n input_bits = np.tile(np.array([1, 0]), self.n_states).reshape(-1, 1)\n\n binary_combinations = np.concatenate([input_bits, states_binary_combinations], axis=1)\n bits_outputs_on_edges = np.matmul(binary_combinations, generator_polys.T) % 2\n llr_outputs_on_edges = (-1) ** bits_outputs_on_edges\n llrs_combinations_to_edges_mat = np.zeros([2 ** self.rate_inverse, 2 * self.n_states])\n\n for row_ind in range(llrs_combinations_to_edges_mat.shape[0]):\n llrs_combinations_to_edges_mat[row_ind] = np.equal(llr_outputs_on_edges,\n self.all_llrs_combinations_mat[row_ind]).all(1)\n\n self.llrs_combinations_to_edges = torch.Tensor(llrs_combinations_to_edges_mat)", "def _mout_edges(nodes):\n n = nodes.shape[0]\n edges = []\n for i in range(0, n - 1):\n for j in range(i, n):\n if abs(nodes[i, 0] - nodes[j, 0]) > 1:\n break\n elif abs(nodes[i, 0] - nodes[j, 0]) == 1 and \\\n abs(nodes[i, 1] - nodes[j, 1]) == 0:\n edges.append([i, j])\n elif abs(nodes[i, 1] - nodes[j, 1]) == 1:\n edges.append([i, j])\n return edges", "def get_edges(self):\n for i in self.gens:\n if self.active[i]:\n elist = set()\n H = (i,) # edge-stabilizing subgroup\n reps = set(self.word_generator(parabolic=H))\n reps = self.G.sort_words(reps)\n for word in reps:\n v1 = self.G.move(self.vtable, 0, word)\n v2 = self.G.move(self.vtable, 0, word + (i,))\n if v1 is not None and v2 is not None:\n if v1 > v2:\n v1, v2 = v2, v1\n if (v1, v2) not in elist:\n elist.add((v1, v2))\n\n self.edge_indices[i] = elist\n\n self.num_edges = sum(len(L) for L in self.edge_indices.values())", "def generate_edges(self):\n for i, n in enumerate(self.points):\n for i1, p in enumerate(self.points[i+1:]):\n d = 0\n differences = 0\n for x in range(3):\n d += math.fabs(n.xyz[x] - p.xyz[x])\n if math.fabs(n.xyz[x] - p.xyz[x]) != 0:\n differences += 1\n\n if differences > 1:\n continue\n\n if d == -1 * self.side_length or d == self.side_length:\n self.edges.append([i, i1 + i + 1])", "def algorithm_h(n, m):\n partition = [1]*m\n partition[0] = n - m + 1\n\n while True:\n yield partition[:]\n if partition[1] < partition[0] - 1:\n partition[0] -= 1\n partition[1] += 1\n else:\n j = 2\n s = partition[0] + partition[1] - 1\n while j < m and partition[j] >= partition[0] - 1:\n s += partition[j]\n j += 1\n if j >= m:\n return\n replacement = partition[j] + 1\n partition[j] = replacement\n j -= 1\n while j > 0:\n partition[j] = replacement\n s -= replacement\n j -= 1\n partition[0] = s", "def createUpPyramidSets(blocksize,operating):\n sets = tuple()\n ul = blocksize[0]-operating\n dl = operating\n while ul > dl:\n r = numpy.arange(dl,ul,1)\n sets+=(tuple(product(r,r)),)\n dl+=operating\n ul-=operating\n return sets", "def count_automorphisms(g: Graph) -> int:\n\n def generate_mapping(g: Graph, h: Graph):\n \"\"\"\n Generates the corresponding mapping from vertex to vertex for the isomorphism between graphs g and h.\n We map g to h.\n :param g: A graph\n :param h: A graph\n :return: A permutation with the mapping from g to h\n \"\"\"\n mapping = [0] * len(g.vertices)\n for v_g in g:\n for v_h in h:\n if v_g.colornum == v_h.colornum:\n mapping[v_g.label] = v_h.label\n return permutation(len(mapping), mapping=mapping)\n\n def generate_automorphisms(g: Graph, h: Graph, d: list[Vertex], i: list[Vertex]):\n \"\"\"\n Is called recursively to traverse through the branching tree and to find all automorphisms.\n :param g: A copy of the original graph\n :param h: Another copy of the original graph\n :param d: A list with pre-colored vertices for graph g\n :param i: A list with pre-colored vertices for graph h\n \"\"\"\n\n # Refine the graphs g and h.\n color_refinement([g, h])\n\n # Make sure that the colors are balanced, and check for a bijection.\n if not is_balanced(g, h):\n return\n if is_bijection(g, h):\n\n # Generate the mapping from g -> h.\n p = generate_mapping(g, h)\n\n # If the permutation cannot be generated by this generating set, we need to add it.\n if not is_member(generating_set, p):\n generating_set.append(p)\n\n # We can now back to the last trivial ancestor nodes in the branching tree.\n while [v.label for v in d] != [v.label for v in i]:\n # We remove the vertices from d and i and mark them as 'used'.\n # This should prevent the algorithm from trying to re-explore a branch that may be skipped.\n # FIXME: This strategy seems too aggressive, the results are sometimes off by a factor 2 or 4\n d.pop().pre_labeled = True\n i.pop().pre_labeled = True\n\n return\n\n c, next_color = get_c([g, h])\n for v_g in g:\n if v_g.colornum == c:# and not v_g.pre_labeled:\n x = v_g\n break\n\n for v_h in h:\n if v_h.colornum == c and not v_h.pre_labeled:\n g1 = g + Graph(False)\n h1 = h + Graph(False)\n g1.vertices[g.vertices.index(x)].colornum = next_color\n h1.vertices[h.vertices.index(v_h)].colornum = next_color\n d.append(x)\n i.append(v_h)\n generate_automorphisms(g1, h1, d, i)\n\n generating_set = []\n graph_copy_1 = g + Graph(False)\n graph_copy_2 = g + Graph(False)\n for v in graph_copy_1.vertices:\n v.pre_labeled = False\n for v in graph_copy_2.vertices:\n v.pre_labeled = False\n generate_automorphisms(graph_copy_1, graph_copy_2, [], [])\n return compute_order(generating_set)", "def make_partioned_regions(shape, alpha=1.0, max_regions=5, min_regions=2):\n ring = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.int16)\n adjacent = np.array([ # Diagonals don't count as adjacent\n [-1,0,0,1],\n [0,-1,1,0]], dtype=np.int16).T\n nearby = np.meshgrid([-2,-1,0,1,2], [-2,-1,0,1,2])\n\n board = np.zeros(shape, dtype=np.int16)\n perimeters = [{\n (i, j) for i, j in zip(*np.nonzero(board == 0))\n }]\n exclusions = [set()]\n while sum(len(p) for p in perimeters) > 0:\n weights = np.array([len(p) for p in perimeters], dtype=float)\n weights[0] = min(alpha, weights[0]) if len(weights) <= max_regions else 1e-10\n if len(weights) <= min_regions:\n weights[1:] = 1e-10\n weights /= np.sum(weights)\n k = get_rng().choice(len(perimeters), p=weights)\n plist = list(perimeters[k])\n i, j = plist[get_rng().choice(len(plist))]\n perimeters[0].discard((i, j))\n perimeters[k].discard((i, j))\n if (i, j) in exclusions[k]:\n continue\n exclusions[0].add((i,j))\n exclusions[k].add((i,j))\n b = board[(i+nearby[0]) % shape[0], (j+nearby[1]) % shape[1]]\n b[2,2] = k or -1\n num_neighbors = signal.convolve2d(b != 0, ring, mode='valid')\n num_foreign = signal.convolve2d((b > 0) & (b != k), ring, mode='valid')\n if ((num_foreign > 0) & (num_neighbors > 2)).any() or num_foreign[1,1] > 0:\n continue\n # Add to the board\n if k == 0:\n k = len(perimeters)\n perimeters.append(set())\n exclusions.append(set())\n board[i, j] = k\n for i2, j2 in (adjacent + (i, j)) % shape:\n if board[i2, j2] == 0:\n perimeters[k].add((i2, j2))\n return board", "def num_43():\n \n def block(a, r=3, cs=3, row_order=True):\n \"\"\"Block slice an array using a window of (rs, cs) size\n \"\"\"\n lenr = a.shape[0]//rs\n lenc = a.shape[1]//cs\n if row_order:\n iter = [(i, j) for (i, j) in np.ndindex(lenr, lenc)]\n else:\n iter = [(j, i) for (i, j) in np.ndindex(lenr, lenc)]\n b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] for (i,j) in iter])\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n return b\n r = 6\n c = 6\n a = np.arange(r*c).reshape(r, c)\n vs = np.array(np.vsplit(a, 2))\n hs = np.array(np.hsplit(a, 2))\n #a.squeeze(axis=(2,3))\n rs = 3\n cs = 4\n #lenr = a.shape[0]//rs\n #lenc = a.shape[1]//cs\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n #b1 = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (j, i) in np.ndindex(lenr, lenc)])\n e = block(a, 3, 4, row_first=False)\n b = block(a, rs, cs, True)\n b1 = block(a, rs, cs, False)\n c = np.array([np.vsplit(i, 2) for i in np.hsplit(a, 2)])\n d = np.array([np.hsplit(i, 2) for i in np.vsplit(a, 2)])\n #c = c.reshape(lenr*lenc, rs, cs) \n return a, b, b1, c, d, e", "def get_all_potential_edges(self) -> Dict[str,\n Tuple[int, int, int, int]]:\n orig_rows = self.tile_rows\n\n ret = dict()\n\n for i in range(0, 4):\n self.rotate_right(i)\n for j in range(0, 2):\n self.flip_l_r(j)\n for k in range(0, 2):\n self.flip_t_b(k)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'rr{i}_lr{j}_tb{k}'] = edges\n\n self.tile_rows = orig_rows\n\n for j in range(0, 2):\n self.flip_l_r(j)\n for i in range(0, 4):\n self.rotate_right(i)\n for k in range(0, 2):\n self.flip_t_b(k)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'lr{j}_rr{i}_tb{k}'] = edges\n\n self.tile_rows = orig_rows\n\n for j in range(0, 2):\n self.flip_l_r(j)\n for k in range(0, 2):\n self.flip_t_b(k)\n for i in range(0, 4):\n self.rotate_right(i)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'lr{j}_tb{k}_rr{i}'] = edges\n\n self.tile_rows = orig_rows\n\n for k in range(0, 2):\n self.flip_t_b(k)\n for j in range(0, 2):\n self.flip_l_r(j)\n for i in range(0, 4):\n self.rotate_right(i)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'tb{k}_lr{j}_rr{i}'] = edges\n\n self.tile_rows = orig_rows\n\n for k in range(0, 2):\n self.flip_t_b(k)\n for i in range(0, 4):\n self.rotate_right(i)\n for j in range(0, 2):\n self.flip_l_r(j)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'tb{k}_rr{i}_lr{j}'] = edges\n\n self.tile_rows = orig_rows\n\n for i in range(0, 4):\n self.rotate_right(i)\n for k in range(0, 2):\n self.flip_t_b(k)\n for j in range(0, 2):\n self.flip_l_r(j)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'rr{i}_tb{k}_lr{j}'] = edges\n\n self.tile_rows = orig_rows\n\n return ret", "def bipartition_by_edge(self, e):\n\n t = self._tree\n nr = e.head_node\n assert e.tail_node is not None\n assert e.head_node is not None\n assert nr.parent_node is e.tail_node\n is_valid_tree(t)\n\n potentially_deleted_nd = e.tail_node\n grandparent_nd = potentially_deleted_nd.parent_node\n e.tail_node.remove_child(nr, suppress_unifurcations=True)\n\n nr.edge.length = None\n nr.parent_node = None\n convert_node_to_root_polytomy(nr)\n t1 = PhylogeneticTree(Tree(seed_node=nr))\n # temp we could speed this up,\n # by telling the Phylogenetic tree how many leaves it has\n n1 = t1.n_leaves\n\n if hasattr(e, \"num_leaves_below\"):\n if grandparent_nd is None:\n old_root = potentially_deleted_nd\n if old_root.edge:\n old_root.edge.num_leaves_below -= n1\n else:\n if potentially_deleted_nd in grandparent_nd.child_nodes():\n potentially_deleted_nd.edge.num_leaves_below -= n1\n old_root = grandparent_nd\n if old_root.edge:\n old_root.edge.num_leaves_below -= n1\n while old_root.parent_node:\n old_root = old_root.parent_node\n if old_root.edge:\n old_root.edge.num_leaves_below -= n1\n else:\n old_root = grandparent_nd or potentially_deleted_nd\n while old_root.parent_node:\n old_root = old_root.parent_node\n\n t2 = PhylogeneticTree(Tree(seed_node=old_root))\n\n is_valid_tree(t1._tree)\n is_valid_tree(t2._tree)\n return t1, t2", "def hmn2(a, s, m0):\n n = 2*m0**s\n links = np.zeros((int(a/m0*n*sum([1/2**x for x in range(1, s+1)])), 2), dtype=np.int32)\n links_i = 0\n p = 0\n \n # At each hierarchy level a number of a links are established,\n # repeating the process if links are repeated.\n for si in range(1, s+1):\n m0_si = m0**si\n for n in range(0, n+1-2*m0_si, 2*m0_si):\n \n if a == 1:\n i = np.random.randint(0 + n, m0_si + n)\n j = np.random.randint(m0_si + n, 2*m0_si + n)\n links[p] = np.array([i, j])\n p += 1\n \n else:\n while len(np.unique(links[links_i:a + links_i], axis=0)) != a:\n for m in range(a):\n i = np.random.randint(0 + n, m0_si + n)\n j = np.random.randint(m0_si + n, 2*m0_si + n)\n links[links_i:a + links_i][m] = np.array([i, j])\n links_i += a\n \n blocks = np.arange(n).reshape((int(n/m0), m0))\n return np.concatenate((blocks, links))", "def additional_edges(cluster_bounds, insertion_factor, optimal_costs):\n n = cluster_bounds[len(cluster_bounds)-1]\n vertexwise_ins_edges = np.zeros(n, dtype=np.int64)\n vertexwise_max_edges = np.zeros(n, dtype=np.int64)\n\n for i in range(0,n):\n vertexwise_max_edges[i] = max_edges_in(i, cluster_bounds, insertion_factor)\n\n for v1 in rand.permutation(np.arange(0,n)):\n lower = get_cluster_bounds(v1, cluster_bounds)[0]\n upper = get_cluster_bounds(v1, cluster_bounds)[1]\n v2_arr = np.zeros((lower + (n-upper)), dtype= np.int64)\n k = 0\n for j in range(0, len(v2_arr)):\n if j < lower:\n v2_arr[j] = j\n else:\n v2_arr[j] = upper + k\n k += 1\n for v2_i in rand.permutation(v2_arr.shape[0]):\n v2 = v2_arr[v2_i]\n if (vertexwise_ins_edges[v1] + 1) > vertexwise_max_edges[v1]:\n break\n if (vertexwise_ins_edges[v2] + 1) <= vertexwise_max_edges[v2]:\n print(v1, v2, 1)\n vertexwise_ins_edges[v1] += 1\n vertexwise_ins_edges[v2] += 1\n optimal_costs[0] += 1", "def dp_partition(edges, to_add=[], to_remove=[]):\n if not edges:\n return to_add, [edge_id for edge_id in to_remove if edge_id is not None]\n\n \"\"\" Take the minimum of two results:\n - merge the first two edges, and consider all remaining edges\n - do not merge the first edge, and consider all remaining edges. \"\"\"\n\n \"\"\" Possibility 1: Do not merge the first two edges. \n Result: Partition on all of the remaining edges. Add the current edge to to_add, \n and the current edge to to_remove. \"\"\"\n skip_edge = dp_partition(edges[1:], to_add + [edges[0]], to_remove + [edges[0][2]])\n\n \"\"\" Possibility 2: Merge the first two edges. \n Result: Partition the newly merged edge with all of the remaining edges, we add \n nothing to to_add because the merged edge may be merged again, \n and we remove the two edges which were merged. \"\"\"\n try:\n merge_edge = dp_partition([merge(edges[0], edges[1])] + edges[2:], to_add,\n to_remove + [edges[0][2]] + [edges[1][2]])\n except (AssertionError, IndexError) as exception:\n \"\"\" Either the first two edges in the pool cannot be merged, or there is only one edge remaining\n in the pool. In both cases, partition without merging. \"\"\"\n merge_edge = skip_edge\n\n \"\"\" Return the result which adds the fewest edges. \"\"\"\n return min(merge_edge, skip_edge, key=lambda pair: len(pair[0]))", "def get_eulerian_graph_edges(bbox, source):\n osm_graph = OSMGraph(bbox, source)\n # input all nodes and get odd nodes, update node attributes\n odd_nodes = get_odd_nodes(osm_graph.nodes_dict)\n\n # initialize all_pairs_list\n all_pairs_list = []\n\n # if there are 6 or fewer odd nodes look for all possible options,\n # otherwise look for just three basic pairing options\n\n if len(odd_nodes) <= 10:\n print(\"ROBUST PAIRING FUNCTION\")\n all_pairs_list = get_list_of_all_pairs_lists(odd_nodes)\n\n else:\n print(\"CHEAP PAIRING FUNCTION\")\n all_pairs_list = get_list_of_all_pairs_lists_short(odd_nodes)\n\n for item in all_pairs_list:\n print(\"\\n\\nPair option:\", item)\n print(\"Pair option len:\", len(item))\n\n dict_pairings_lists_lengths = get_dict_pairings_lists_lengths(\n all_pairs_list, osm_graph\n )\n twice_traversals_edges = get_twice_traversals_edges(dict_pairings_lists_lengths)\n updated_graph_instance = update_twice_traversal_edges(\n twice_traversals_edges, osm_graph\n )\n return updated_graph_instance", "def cantor() -> bigger.MCG[Edge]: # pylint: disable=too-many-statements\n\n POS, EQ, NEG = +1, 0, -1\n\n def edges() -> Iterable[Edge]:\n for x in naturals():\n for y in [POS, EQ, NEG]:\n yield x, y\n\n def negate(X: Edge) -> Edge:\n return X[0], -X[1]\n\n def invert(sign: int, X: tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]) -> tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]:\n return X if sign == POS else (negate(X[6]), not X[7], negate(X[4]), not X[5], negate(X[2]), not X[3], negate(X[0]), not X[1])\n\n def link(edge: Edge) -> tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]:\n n, k = edge\n if k == EQ: # Equator\n if n == 0:\n return ((0, NEG), False, (1, NEG), True, (1, POS), False, (0, POS), True)\n elif n == 1:\n return ((2, POS), False, (0, POS), False, (0, NEG), True, (2, NEG), True)\n else: # n > 1\n return ((3 * n - 3, NEG), False, (3 * n - 1, NEG), True, (3 * n - 1, POS), False, (3 * n - 3, POS), True)\n\n # Northern / Southern hemisphere.\n if n == 0:\n return invert(k, ((0, EQ), False, (1, POS), False, (1, EQ), True, (2, POS), False))\n elif n == 1:\n return invert(k, ((4, POS), False, (3, POS), False, (0, POS), True, (0, EQ), False))\n elif n == 2:\n return invert(k, ((7, POS), False, (6, POS), False, (0, POS), False, (1, EQ), True))\n N, r = n // 3 + 1, n % 3\n incoming = 3 * (N // 2) - (1 if N % 2 else 2)\n if r == 0:\n return invert(k, ((N, EQ), False, (n + 2, POS), False, (incoming, POS), True, (n + 1, POS), False))\n elif r == 1:\n return invert(k, ((6 * N - 2, POS), False, (6 * N - 3, POS), False, (n - 1, POS), False, (incoming, POS), True))\n else: # r == 2:\n return invert(k, ((6 * N + 1, POS), False, (6 * N + 0, POS), False, (n - 2, POS), True, (N, EQ), False))\n\n T = bigger.Triangulation.from_pos(edges, link)\n\n def generator(name: str) -> bigger.Encoding[Edge]: # pylint: disable=too-many-branches\n twist_match = re.match(r\"(?P<curve>[ab])_(?P<n>-?\\d+)$\", name)\n rotate_match = re.match(r\"r$\", name)\n\n if twist_match is not None:\n parameters = twist_match.groupdict()\n curve_name = parameters[\"curve\"]\n N = int(parameters[\"n\"])\n if curve_name == \"a\":\n if N == 1:\n cut_sequence = [(0, EQ), (0, POS), (1, EQ)]\n else:\n cut_sequence = [(0, EQ), (N, EQ), (3 * N - 3, POS)]\n while N > 1:\n low_N = N // 2\n cut_sequence.append((3 * low_N - (1 if N % 2 else 2), POS))\n if N % 2:\n cut_sequence.append((3 * low_N - 3, POS))\n N = low_N\n elif curve_name == \"b\":\n if N <= 3:\n cut_sequence = [(0, EQ), (0, POS), (1, EQ)]\n else:\n extend_left = N % 2\n N = N // 2\n cut_sequence = [(N, EQ), (3 * N - 3, POS)]\n while N > 1:\n N_low = N // 2\n cut_sequence.append((3 * N_low - (1 if N % 2 else 2), POS))\n if extend_left:\n cut_sequence.append((3 * N_low - 3, POS))\n if N % 2 != extend_left:\n cut_sequence.append((N_low, EQ))\n break\n N = N_low\n else:\n cut_sequence.append((0, EQ))\n\n curve = T(dict(((x, y * s), 1) for x, y in cut_sequence for s in [+1, -1]))\n return curve.twist()\n elif rotate_match is not None:\n\n def isom(edge: Edge) -> Edge:\n n, k = edge\n if k == EQ:\n if n == 0:\n return (1, EQ)\n elif n == 1:\n return (0, EQ)\n return (n ^ (1 << n.bit_length() - 2), k)\n\n if n == 0:\n return (0, k)\n elif n == 1:\n return (2, k)\n elif n == 2:\n return (1, k)\n N, r = n // 3 + 1, n % 3\n return (3 * (N ^ (1 << N.bit_length() - 2)) - 3 + r, k)\n\n return T.encode([(-1, isom, isom)])\n\n raise ValueError(f\"Unknown mapping class {name}\")\n\n return bigger.MCG(T, generator)", "def get_bipartition(g):\n # Write your code here.\n colorArr = [-1] * (len(g.nodes()) + 1)\n for node in g.nodes():\n start = g.neighbors(node)\n if len(start)>0:\n src = start.pop()\n break\n colorArr[src] = 1\n queue = []\n queue.append(src)\n while (queue):\n u = queue.pop()\n for v in g.nodes():\n if g.has_edge(u, v) and colorArr[v] == -1:\n colorArr[v] = 1 - colorArr[u]\n queue.append(v)\n elif g.has_edge(u, v) and colorArr[u] == colorArr[v]:\n return None\n\n red = set()\n for i in range(1, len(colorArr)):\n if colorArr[i] == 1:\n red.add(i)\n return list(red)\n\n\n\n # Hint! If you'd like to test out these commands without\n # writing a full-fledged program, you might want to familiarise\n # yourself with the Python interactive shell or IPython (available\n # on at least some Aalto IT computers)\n\n # Create a simple line graph g: \"(1)->(2)->(3)\"\n # (The creation parameter is a dict of {node: list_of_neighbors},\n # but this is not something you will be needing in your code.)\n # >>> from networkx import Graph \n # >>> g = Graph({1: [2], 2: [3]})\n # >>> g.number_of_nodes()\n # 3\n\n # Example. Iterate over the nodes and mark them as visited\n # >>> visited = set()\n # >>> for node in g.nodes_iter(): # There is also g.nodes(), which returns a list\n # ... # do some work here\n # ... visited.add(node)\n \n # Example. Given a Node v, get all nodes s.t. there is an edge between\n # v and that node\n # >>> g.neighbors(1)\n # [2]\n\n # Example. Get the edges of the graph:\n # >>> e.edges() # as with nodes, there is also g.edges_iter()\n # [(1, 2), (2, 3)]\n\n # For more information, consult the NetworkX documentation:\n # https://networkx.github.io/documentation/networkx-1.10/tutorial/tutorial.html", "def random_partition_graph(groups, p_in, p_out, seed=None):\r\n\r\n if p_in > 1 or p_in < 0:\r\n raise errorhandler.ErrorHandler(\"p_in must be in [0,1]\")\r\n\r\n if p_out > 1 or p_out < 0:\r\n raise errorhandler.ErrorHandler(\"p_out must be in [0,1]\")\r\n\r\n size = sum(groups)\r\n g = graph.Graph(size, is_partition=True)\r\n\r\n next_group = {}\r\n start = 0\r\n group_index = 0\r\n for n in groups: # connect nodes inside a group\r\n edges = ((u + start, v + start) for u, v in fast_random_graph(n, p_in).edges)\r\n g.add_edges(edges)\r\n g.partition.append(set(range(start, start+n)))\r\n next_group.update(dict.fromkeys(range(start, start + n), start + n))\r\n group_index += 1\r\n start += n\r\n\r\n # connect nodes between groups\r\n if p_out == 0:\r\n return g\r\n if p_out == 1:\r\n for n in next_group:\r\n targets = range(next_group[n], len(g))\r\n g.add_edges(zip([n] * len(targets), targets))\r\n return g\r\n\r\n # using method similar to fast_random_graph\r\n lp = math.log(1.0 - p_out)\r\n n = len(g)\r\n\r\n for u in range(n - 1):\r\n v = next_group[u]\r\n while v < n:\r\n lr = math.log(1.0 - random.random())\r\n v += int(lr / lp)\r\n if v < n:\r\n g.add_edge(u, v)\r\n v += 1\r\n\r\n return g", "def test_get_edges_2(self):\n G = [[0, [0, 1, 2, 3]], [1, [0, 2]], [2, [0, 1, 3]], [3, [0, 2, 3]]]\n edges = kargermincut.get_edges(G)\n edges = kargermincut.remove_self_loops(edges)\n self.assertEqual(edges, [[0, 1], [0, 2], [0, 3], [1, 2], [2, 3]])", "def test_reiterative_leiden(self):\n edges = _get_edges()\n single_modularity, single_partitions = gpn.leiden(edges, seed=seed)\n\n repetitive_modularity, repetitive_partitions = gpn.leiden(edges, seed=seed, trials=10)\n self.assertTrue(single_modularity < repetitive_modularity)", "def distribute_uniform(totalsize, groups):\n ret = []\n for i in range(groups):\n myn = totalsize // groups\n off = 0\n leftover = totalsize % groups\n if ( i < leftover ):\n myn = myn + 1\n off = i * myn\n else:\n off = ((myn + 1) * leftover) + (myn * (i - leftover))\n ret.append( (off, myn) )\n return ret", "def ggn_factor_inner_shape(self) -> Sequence[int]:\n pass", "def kd_domain_split(counts_all, ndomains, log=null_log):\n\n split_fac = 1.35 * (float(ndomains)/np.cumprod(counts_all.shape)[-1])**(1.0/3.0)\n print('split factor', split_fac, file=log)\n # First translate the box so 0,0,0 in best posn to minimise communication\n total_shifts = []\n for axis in range(3):\n # Sum over other axes\n sum_axes = list(np.arange(len(counts_all.shape)))\n sum_axes.pop(axis)\n sum_axes = tuple(sum_axes)\n\n count_ax = counts_all.sum(axis=sum_axes, dtype=np.int64)\n # amount communicated per plane\n comm = count_ax + np.roll(count_ax, 1)\n\n total_shifts.append(np.argmin(comm))\n\n\n for axis, r in enumerate(total_shifts):\n counts_all = np.roll(counts_all, shift=-r, axis=axis)\n\n print('Best shifts', total_shifts, file=log)\n\n\n # pad\n counts_pad = np.empty(tuple(v+2 for v in counts_all.shape), dtype=counts_all.dtype)\n counts_pad[1:-1,1:-1,1:-1] = counts_all\n counts_pad[1:-1,1:-1,0] = counts_pad[1:-1,1:-1, -2]\n counts_pad[1:-1,1:-1,-1] = counts_pad[1:-1,1:-1,1]\n counts_pad[1:-1,0] = counts_pad[1:-1, -2]\n counts_pad[1:-1,-1] = counts_pad[1:-1, 1]\n counts_pad[0] = counts_pad[-2]\n counts_pad[-1] = counts_pad[1]\n\n\n domain_segments = []\n\n doms_tosplit = [((0,0,0), counts_pad, ndomains)]\n\n while len(doms_tosplit):\n dom_topleft, counts, ndom = doms_tosplit.pop(0)\n\n if ndom==1:\n # done\n dom_shape = tuple(v-2 for v in counts.shape)\n domain_segments.append((dom_topleft, dom_shape, counts.sum(dtype=np.uint64)))\n continue\n\n # Bisect this domain \n axis, split_idx, n_L = bisect_anyaxis(counts, ndom, split_fac)\n\n n_R = ndom-n_L\n\n if axis==0:\n counts_L, counts_R = counts[:split_idx+2], counts[split_idx:]\n elif axis==1:\n counts_L, counts_R = counts[:,:split_idx+2], counts[:,split_idx:] \n elif axis==2:\n counts_L, counts_R = counts[:,:,:split_idx+2], counts[:,:,split_idx:]\n else:\n raise Exception('3d only, aaargh.')\n\n # add left and right domains\n doms_tosplit.append((dom_topleft, counts_L, n_L))\n\n # top left of right domain\n dom_R_topleft = list(dom_topleft)\n dom_R_topleft[axis] += split_idx\n dom_R_topleft = tuple(dom_R_topleft)\n\n doms_tosplit.append((dom_R_topleft, counts_R, n_R))\n\n\n # sort domains biggest->smallest\n domain_segments = sorted(domain_segments, key=lambda ijk_shape_pts:-ijk_shape_pts[2])\n\n doms = np.empty(counts_all.shape, dtype=np.int16)\n\n for d,(ijk, shape, tot_pts) in enumerate(domain_segments):\n segment = tuple(slice(i,i+size) for i,size in zip(ijk, shape))\n doms[segment] = d+1\n real_pts = counts_all[segment].sum(dtype=np.int64)\n# print('domain', d, 'shape', shape, '{:,} pts, {:,} total'.format(real_pts, tot_pts), file=log)\n\n # Undo the total shifts\n for axis, r in enumerate(total_shifts):\n doms = np.roll(doms, shift=r, axis=axis)\n \n return doms", "def test_greedy_partition(self):\r\n\r\n #(non) partition into one bucket\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 1)\r\n self.assertEquals(obs_levels, [6])\r\n self.assertEquals(obs_part, [['3', '1', '2']])\r\n\r\n # two buckets\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 2)\r\n\r\n self.assertEquals(obs_levels, [3, 3])\r\n self.assertEquals(obs_part, [['3'], ['1', '2']])\r\n\r\n # larger input\r\n obs_part, obs_levels = greedy_partition({'1': 1, '2': 2, '3': 3,\r\n '4': 4, '5': 5, '6': 6}, 2)\r\n self.assertEquals(obs_levels, [11, 10])\r\n self.assertEquals(obs_part, [['6', '3', '2'], ['5', '4', '1']])", "def greedy_split(arr, n, axis=0):\n length = arr.shape[axis]\n # compute the size of each of the first n-1 blocks\n block_size = int(np.ceil(length / float(n)))\n # the indices at which the splits will occur\n ix = np.arange(block_size, length, block_size)\n return np.array(np.split(arr, ix, axis))", "def _get_sharded_ranges(\n begin,\n end,\n max_length,\n):\n if max_length <= 0:\n raise ValueError(\"max_length <= 0.\")\n length = end - begin\n if length <= max_length:\n return [(begin, end)]\n pivot = begin + length // 2\n return (_get_sharded_ranges(begin, pivot, max_length) +\n _get_sharded_ranges(pivot, end, max_length))", "def Generate_edges(size, connectedness):\r\n\r\n assert connectedness <= 1\r\n random.seed(10)\r\n for i in range(size):\r\n for j in range(i + 1, size):\r\n if random.randrange(0, 100) <= connectedness * 100:\r\n yield f'{i} {j}'", "def edges(self):\n\t\tleftback = self.center + self.left*self.wr - self.forward*self.hr\n\t\tleftfront = self.center + self.left*self.wr + self.forward*self.hr\n\t\trightfront = self.center - self.left*self.wr + self.forward*self.hr\n\t\trightback = self.center - self.left*self.wr - self.forward*self.hr\n\t\tyield (leftback, leftfront)\n\t\tyield (leftfront, rightfront)\n\t\tyield (rightfront, rightback)\n\t\tyield (rightback, leftback)", "def regions_from_graph(vertices, edges):\n # step 0 remove filaments (not included in original algorithm)\n nv = np.zeros(len(vertices))\n v = vertices.keys()\n v.sort()\n v2e = {}\n for edge in edges:\n s,e = edge\n nv[v.index(s)] += 1\n nv[v.index(e)] += 1\n v2e[s] = edge\n v2e[e] = edge\n\n filament_nodes = np.nonzero(nv==1)[0]\n filaments = []\n for f in filament_nodes:\n filaments.append(v2e[f])\n edges.remove(v2e[f])\n\n #print filaments\n\n # step 1\n # have a twin for each directed edge\n dedges = edges[:]\n for edge in edges:\n new_edge = edge[1], edge[0]\n if new_edge not in dedges:\n dedges.append( (edge[1],edge[0]) )\n\n # step 2 complement each directed edge with an angle formed with horizontal\n # line passing through edge[0] for each edge\n angles = []\n from math import atan2, degrees\n\n for edge in dedges:\n\n v1 = vertices[edge[0]]\n v2 = vertices[edge[1]]\n dx = v2[0] - v1[0]\n dy = v2[1] - v1[1]\n at = atan2(dy, dx)\n d = degrees(at)\n if d < 0:\n d = 360 + d\n angles.append( [ (edge[0],d), (edge[0],edge[1]) ])\n\n # step 3 sort the list into ascending order using vi and angle as primary and\n # secondary keys\n angles.sort()\n\n\n # form wedges on consecutive entries with same vi (vi,vj,dij), (vi,vk,dik)\n # gives the wedge (vk,vi,vj)\n wedges = []\n start = angles[0]\n c = 0\n for i in range(1,len(angles)):\n next_edge = angles[i]\n previous_edge = angles[i-1]\n if next_edge[0][0] == start[0][0]:\n wedge = [ next_edge[1][1], previous_edge[1][0], previous_edge[1][1] ]\n wedges.append(wedge)\n else:\n # first form wedge with last and first entry of current group\n # to do\n wedge = [ start[1][1], previous_edge[1][0], previous_edge[1][1] ]\n wedges.append(wedge)\n start = next_edge\n\n # final pair\n\n wedge = [ start[1][1], previous_edge[1][0], next_edge[1][1] ]\n wedges.append(wedge)\n\n\n # phase two\n # form regions from contiguous wedges\n\n nw = len(wedges)\n used = [0]*nw\n wedges.sort()\n #print wedges\n\n #print 'forming regions'\n\n i = 0\n regions = []\n while sum(used) < nw:\n i = used.index(0)\n wi = wedges[i]\n start = wedges[i]\n used[i] = 1\n region = [start]\n # find next contiguous wedge for wi\n forming = True\n while forming:\n\n\n # find first wedge contiguous to wi\n for j in xrange(nw):\n wj = wedges[j]\n if wj[0] == wi[1] and wj[1] == wi[2]:\n region.append(wj)\n used[j] = 1\n wi = wj\n if wi[1] == start[0] and wi[2] == start[1]:\n forming = False\n regions.append(region)\n #print start, regions\n #raw_input('h')\n break\n\n # put in closed cartographic form\n nodes = []\n for region in regions:\n wedge0 = [ wedge[0] for wedge in region]\n wedge0.append(wedge0[0])\n nodes.append(wedge0)\n\n results = {}\n results['regions'] = nodes\n results['filaments'] = filaments\n\n return results", "def find_edges(self):\n self.edges = [deepcopy(self.grid[0]), [], deepcopy(self.grid[-1]), []]\n for g in self.grid:\n self.edges[3].append(g[0])\n self.edges[1].append(g[-1])\n self.edges[2]\n self.edges[3]", "def find_boundary(edges):\n\n inputs = set([x[0] for x in edges])\n outputs = set([x[1] for x in edges])\n for e in edges:\n inputs.discard(e[1])\n outputs.discard(e[0])\n return inputs, outputs", "def golden_split(n):\n large = n / GOLDEN_RATIO\n small = n - large\n large = int(round(large))\n small = int(round(small))\n return large, small", "def solve_for_edge_dimensionality(n):\n return int(round(np.sqrt(2 * n + 2.25) - 1.5))", "def generate_possible_slices(L, H):\n n_min = 2 * L\n n_max = H\n\n slices = []\n for he in range(1, n_max+1):\n for wi in range(max(1, n_min // he), n_max + 1):\n if he * wi > n_max:\n break\n slices.append((wi, he))\n\n return slices", "def red_boundaries_as_spaces(self):\n marks = set(self.marks)\n if self.n != 0:\n first_mark_list = [marks.pop()]\n \n \n p1,p2 = self.next_marks() \n \n for g1 in range(0, self.genus + 1):\n for p in subsets(marks):\n r_marks = set(first_mark_list + p)\n if 3*g1 - 3 + len(r_marks) + 1 >= 0 and 3*(self.genus-g1) - 3 + self.n - len(r_marks) + 1 >= 0:\n yield (Mgn(g1, r_marks.union([p1])), p1), (Mgn(self.genus - g1, marks.difference(r_marks).union([p2])), p2) \n \n else: # self.n == 0\n for g1 in range(1, floor(self.genus/2.0)+1):\n yield (Mgn(g1, [1]), 1) , (Mgn(self.genus-g1, [2]), 2)", "def partition_graph_with_halo(g, node_part, extra_cached_hops, reshuffle=False):\n assert len(node_part) == g.num_nodes()\n if reshuffle:\n g, node_part = reshuffle_graph(g, node_part)\n orig_nids = g.ndata[\"orig_id\"]\n orig_eids = g.edata[\"orig_id\"]\n\n node_part = utils.toindex(node_part)\n start = time.time()\n subgs = _CAPI_DGLPartitionWithHalo_Hetero(\n g._graph, node_part.todgltensor(), extra_cached_hops\n )\n # g is no longer needed. Free memory.\n g = None\n print(\"Split the graph: {:.3f} seconds\".format(time.time() - start))\n subg_dict = {}\n node_part = node_part.tousertensor()\n start = time.time()\n\n # This function determines whether an edge belongs to a partition.\n # An edge is assigned to a partition based on its destination node. If its destination node\n # is assigned to a partition, we assign the edge to the partition as well.\n def get_inner_edge(subg, inner_node):\n inner_edge = F.zeros((subg.num_edges(),), F.int8, F.cpu())\n inner_nids = F.nonzero_1d(inner_node)\n # TODO(zhengda) we need to fix utils.toindex() to avoid the dtype cast below.\n inner_nids = F.astype(inner_nids, F.int64)\n inner_eids = subg.in_edges(inner_nids, form=\"eid\")\n inner_edge = F.scatter_row(\n inner_edge,\n inner_eids,\n F.ones((len(inner_eids),), F.dtype(inner_edge), F.cpu()),\n )\n return inner_edge\n\n # This creaets a subgraph from subgraphs returned from the CAPI above.\n def create_subgraph(subg, induced_nodes, induced_edges, inner_node):\n subg1 = DGLGraph(gidx=subg.graph, ntypes=[\"_N\"], etypes=[\"_E\"])\n # If IDs are shuffled, we should shuffled edges. This will help us collect edge data\n # from the distributed graph after training.\n if reshuffle:\n # When we shuffle edges, we need to make sure that the inner edges are assigned with\n # contiguous edge IDs and their ID range starts with 0. In other words, we want to\n # place these edge IDs in the front of the edge list. To ensure that, we add the IDs\n # of outer edges with a large value, so we will get the sorted list as we want.\n max_eid = F.max(induced_edges[0], 0) + 1\n inner_edge = get_inner_edge(subg1, inner_node)\n eid = F.astype(induced_edges[0], F.int64) + max_eid * F.astype(\n inner_edge == 0, F.int64\n )\n\n _, index = F.sort_1d(eid)\n subg1 = edge_subgraph(subg1, index, relabel_nodes=False)\n subg1.ndata[NID] = induced_nodes[0]\n subg1.edata[EID] = F.gather_row(induced_edges[0], index)\n else:\n subg1.ndata[NID] = induced_nodes[0]\n subg1.edata[EID] = induced_edges[0]\n return subg1\n\n for i, subg in enumerate(subgs):\n inner_node = _get_halo_heterosubgraph_inner_node(subg)\n inner_node = F.zerocopy_from_dlpack(inner_node.to_dlpack())\n subg = create_subgraph(\n subg, subg.induced_nodes, subg.induced_edges, inner_node\n )\n subg.ndata[\"inner_node\"] = inner_node\n subg.ndata[\"part_id\"] = F.gather_row(node_part, subg.ndata[NID])\n if reshuffle:\n subg.ndata[\"orig_id\"] = F.gather_row(orig_nids, subg.ndata[NID])\n subg.edata[\"orig_id\"] = F.gather_row(orig_eids, subg.edata[EID])\n\n if extra_cached_hops >= 1:\n inner_edge = get_inner_edge(subg, inner_node)\n else:\n inner_edge = F.ones((subg.num_edges(),), F.int8, F.cpu())\n subg.edata[\"inner_edge\"] = inner_edge\n subg_dict[i] = subg\n print(\"Construct subgraphs: {:.3f} seconds\".format(time.time() - start))\n if reshuffle:\n return subg_dict, orig_nids, orig_eids\n else:\n return subg_dict, None, None", "def greedy_partition(counts, n):\r\n\r\n buckets = [[] for i in range(n)]\r\n fill_levels = [0 for i in range(n)]\r\n\r\n for key in sorted(counts, reverse=True,\r\n key=lambda c: counts[c]):\r\n smallest = fill_levels.index(min(fill_levels))\r\n buckets[smallest].append(key)\r\n fill_levels[smallest] += counts[key]\r\n\r\n return buckets, fill_levels", "def pre_processing(self):\n while self.number_of_dmax() < 1:\n self.dmax -= 1\n __edges = self.current_edges()\n print('current edges =', __edges, ' expected edges =', self.edges)\n if __edges < self.edges:\n __temp = self.dmax\n __l = self.dmax\n self.dmax *= 2\n __r = self.dmax\n while self.number_of_dmax() >= 1 and __r < self.nodes:\n __l = __r\n self.dmax *= 2\n __r = self.dmax\n while __l < __r:\n self.dmax = int((__l + __r) / 2)\n if self.number_of_dmax() < 1:\n __r = self.dmax\n else:\n __l = self.dmax + 1\n self.dmax = __l - 1\n __edges = self.current_edges()\n if __edges > self.edges:\n __l = __temp\n __r = self.dmax\n while __l < __r:\n self.dmax = int((__l + __r) / 2)\n __edges = self.current_edges()\n if __edges > self.edges:\n __r = self.dmax\n else:\n __l = self.dmax + 1\n self.dmax = __l - 1\n print('adjust dmax =', self.dmax, ' edges =', int(__edges))\n elif __edges > self.edges:\n __temp1 = [_ ** self.lmd for _ in range(self.dmin, self.dmax + 1)]\n __temp2 = [_ * __ for _, __ in zip(__temp1, list(range(self.dmin, self.dmax+1)))]\n c = self.edges / sum(__temp2)\n n = c * sum(__temp1)\n self.select_p = n / self.nodes\n print('reduce select p =', self.select_p)", "def permuteEdges(self):\n\t\tpermuted_graph = copy.copy(self)\n\t\t# swap about half the edges\n\t\ti = len(self.graph)/2\n\t\twhile i > 0:\n\t\t\t# swap edge targets\n\t\t\tsourceA, targetA = random.choice(permuted_graph.graph.keys())\n\t\t\tiTypeA, emA = permuted_graph.graph[(sourceA, targetA)]\n\t\t\tsourceB, targetB = random.choice(permuted_graph.graph.keys())\n\t\t\tiTypeB, emB = permuted_graph.graph[(sourceB, targetB)]\n\n\t\t\t# can't be the same random choice, obviously...\n\t\t\tif sourceA == sourceB or targetA == targetB:\n\t\t\t\tcontinue\n\n\t\t\t# add edges\n\t\t\tpermuted_graph.graph[(sourceA, targetB)] = (iTypeA, emA)\n\t\t\tpermuted_graph.graph[(sourceB, targetA)] = (iTypeB, emB)\n\n\t\t\tdel permuted_graph.graph[(sourceA, targetA)]\n\t\t\tdel permuted_graph.graph[(sourceB, targetB)]\n\n\t\t\ti -= 1\n\n\t\t# return a new graph object\t\t\n\t\treturn permuted_graph", "def get_edges(chromosome):\n edges = []\n for i in range(len(chromosome)):\n for j in range(len(chromosome[i]) - 1):\n m = 2 * chromosome[i][j]\n if chromosome[i][j] < 0:\n m = -m - 1\n n = 2 * chromosome[i][j + 1] - 1\n if chromosome[i][j + 1] < 0:\n n = -n -1\n edges.append((m, n))\n m = 2 * chromosome[i][j + 1]\n if chromosome[i][j + 1] < 0:\n m = -m - 1\n n = 2 * chromosome[i][0] - 1\n if chromosome[i][0] < 0:\n n = -n -1\n edges.append((m, n))\n return edges", "def _get_subgraph(entities, kb_r, multigraph_W):\n seed = np.zeros((multigraph_W.shape[0], 1))\n if not SEED_WEIGHTING:\n seed[entities] = 1. / len(set(entities))\n else:\n seed[entities] = np.expand_dims(np.arange(len(entities), 0, -1),\n axis=1)\n seed = seed / seed.sum()\n ppr = _personalized_pagerank(seed, multigraph_W)\n sorted_idx = np.argsort(ppr)[::-1]\n extracted_ents = sorted_idx[:MAX_ENT]\n extracted_scores = ppr[sorted_idx[:MAX_ENT]]\n # check if any ppr values are nearly zero\n zero_idx = np.where(ppr[extracted_ents] < 1e-6)[0]\n if zero_idx.shape[0] > 0:\n extracted_ents = extracted_ents[:zero_idx[0]]\n extracted_tuples = []\n ents_in_tups = set()\n for relation in kb_r:\n submat = kb_r[relation][extracted_ents, :]\n submat = submat[:, extracted_ents]\n row_idx, col_idx = submat.nonzero()\n for ii in range(row_idx.shape[0]):\n extracted_tuples.append(\n (extracted_ents[row_idx[ii]], relation,\n extracted_ents[col_idx[ii]]))\n ents_in_tups.add((extracted_ents[row_idx[ii]],\n extracted_scores[row_idx[ii]]))\n ents_in_tups.add((extracted_ents[col_idx[ii]],\n extracted_scores[col_idx[ii]]))\n return extracted_tuples, list(ents_in_tups)", "def find_loops(edges):\n check_regularity(edges)\n loops = []\n edges = edges[:]\n start_i = -1\n last_i = -1\n n = []\n while edges != []:\n if start_i == -1:\n e = edges[0]\n n = [e]\n del edges[0]\n start_i = n[-1][0]\n last_i = n[-1][1]\n else:\n ok = False\n for i, e in enumerate(edges):\n if e[0] == last_i:\n n.append(e)\n del edges[i]\n ok = True\n break\n elif e[1] == last_i:\n n.append((e[1], e[0]))\n del edges[i]\n ok = True\n break\n if not ok:\n if start_i == last_i:\n start_i = -1\n loops.append(n)\n else:\n raise Exception(\"Missing some boundary edge\")\n last_i = n[-1][1]\n if start_i == last_i:\n loops.append(n)\n else:\n raise Exception(\"Missing some boundary edge\")\n return loops", "def GetEdgesPent(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.all_edges,np.ndarray):\n if self.all_edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.all_edges.shape[1]==2 and p > 1:\n pass\n else:\n return self.all_edges\n\n node_arranger = np.array([\n [0,1],\n [1,2],\n [2,3],\n [3,4],\n [4,0],\n ])\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],\n self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)\n\n # REMOVE DUPLICATES\n edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)\n\n edge_to_element = np.zeros((edges.shape[0],2),np.int64)\n edge_to_element[:,0] = idx % self.elements.shape[0]\n edge_to_element[:,1] = idx // self.elements.shape[0]\n\n self.edge_to_element = edge_to_element\n self.all_edges = edges\n\n return edges", "def split(gt, isotropic):\n edges = get_edges(gt, isotropic)\n cytosol = edges == 0\n membrane = edges != 0\n return cytosol, membrane", "def bipartite_vertex_cover(bigraph, algo=\"Hopcroft-Karp\"):\n if algo == \"Hopcroft-Karp\":\n coord = [(irow,icol) for irow,cols in enumerate(bigraph) for icol in cols]\n coord = np.array(coord)\n graph = csr_matrix((np.ones(coord.shape[0]),(coord[:,0],coord[:,1])))\n matchV = maximum_bipartite_matching(graph, perm_type='row')\n matchV = [None if x==-1 else x for x in matchV]\n nU, nV = graph.shape\n assert len(matchV) == nV\n elif algo == \"Hungarian\":\n matchV = max_bipartite_matching2(bigraph)\n nU, nV = len(bigraph), len(matchV)\n else:\n assert False\n\n matchU = [None] * nU\n \n for v in range(nV): # -- build the mapping from U to V\n if matchV[v] is not None:\n matchU[matchV[v]] = v\n \n def old_konig():\n visitU = [False] * nU # -- build max alternating forest\n visitV = [False] * nV\n for u in range(nU):\n if matchU[u] is None: # -- starting with free vertices in U\n _alternate(u, bigraph, visitU, visitV, matchV)\n inverse = [not b for b in visitU]\n return (inverse, visitV)\n \n def new_konig():\n # solve the limitation of huge number of recursive calls\n visitU = [False] * nU # -- build max alternating forest\n visitV = [False] * nV\n wait_u = set(range(nU)) - set(matchV) \n while len(wait_u) > 0:\n u = wait_u.pop()\n visitU[u] = True\n for v in bigraph[u]:\n if not visitV[v]:\n visitV[v] = True\n assert matchV[v] is not None # otherwise match is not maximum\n assert matchV[v] not in wait_u\n wait_u.add(matchV[v])\n inverse = [not b for b in visitU]\n return (inverse, visitV)\n \n #res_old = old_konig()\n res_new = new_konig()\n #assert res_old == res_new\n return res_new", "def node_assignment_bipartite(edge_index: nb.int64[:,:],\n edge_label: nb.int64[:],\n primaries: nb.int64[:],\n n: nb.int64) -> nb.int64[:]:\n group_ids = np.arange(n, dtype=np.int64)\n others = [i for i in range(n) if i not in primaries]\n for i in others:\n inds = edge_index[:,1] == i\n if np.sum(inds) == 0:\n continue\n indmax = np.argmax(edge_label[inds])\n group_ids[i] = edge_index[inds,0][indmax]\n\n return group_ids", "def graphs_conn_iso(n):\n def graphs_conn_helper(n):\n for oldg in graphs_conn_iso(n-1):\n for s in powerset(range(n-1)):\n if s == ():\n continue\n g = oldg + [list(s)]\n for v in s:\n g[v] = g[v] + [n-1]\n # NOT g[v] += ... or g[v].append(...)\n # to avoid changing items in oldg\n yield g\n\n assert n >= 0\n if n >= 3:\n for g in unique_iso(graphs_conn_helper(n)):\n yield g\n elif n == 2:\n yield [ [1], [0] ]\n elif n == 1:\n yield [ [] ]\n else: # n == 0\n yield []", "def greedy_variable_order(primal_graph:PrimalGraph, pvo:List[List[int]]=None, pool_size=8, cutoff=INF):\n def fill_count(nid):\n \"\"\"\n count number of fill-in edges after removing nid\n number of combinations of nhd - existing edges (nodes in the subgraph of nhd)\n \"\"\"\n n_edges = G.subgraph(G.neighbors(nid)).number_of_edges()\n deg = G.degree[nid]\n n_fill = deg*(deg-1)//2 - n_edges\n return n_fill\n\n def remove_fill_in_edges(nid):\n G.add_edges_from(itertools.combinations(G.neighbors(nid), 2)) # adding edge twice? no effect\n G.remove_node(nid)\n\n G = primal_graph.copy() # G = copy.deepcopy(primal_graph)\n if pvo is None:\n pvo = [list(G.nodes())] #[ [all in one block] ]\n ordering = []\n induced_width = 0\n for each_block in pvo:\n processing_nodes = SortedList( [(fill_count(nid), nid) for nid in each_block] ) # ascending order\n while processing_nodes:\n fill, selected_nid = processing_nodes[0]\n if fill != 0: # don't add any edge\n # pick a node in random from a pool of best nodes; each node has prob 1/(fill_in edges)\n scores, candidates = zip(*processing_nodes[:pool_size])\n probs = np.power(np.array(scores), -1.0)\n selected_ind = np.random.choice(len(probs), p=probs/(np.sum(probs)))\n selected_nid = candidates[selected_ind]\n ordering.append(selected_nid)\n # current_width = len(G.neighbors(selected_nid))\n current_width = G.degree[selected_nid]\n if current_width > cutoff:\n return None, induced_width\n if current_width > induced_width:\n induced_width = current_width\n remove_fill_in_edges(selected_nid)\n # recompute score after removing the selected node from primal graph\n processing_nodes = SortedList( [(fill_count(nid), nid) for _, nid in processing_nodes if nid != selected_nid] )\n return ordering, induced_width", "def PRGA(tab):\n i = 0\n j = 0\n while True:\n i = (i + 1) % MOD\n j = (j + tab[i]) % MOD\n\n tab[i], tab[j] = tab[j], tab[i]\n K = tab[(tab[i] + tab[j]) % MOD]\n yield K", "def num_edges(g):\n total_edges_with_duplicates = sum(len(v) for v in g.values())\n return total_edges_with_duplicates // 2", "def generate_edges(n, offset):\n max_edges = np.int64((n * (n-1)) / 2)\n edges = np.zeros((max_edges,2), dtype=np.int64)\n k = 0\n for v1 in range(0+offset,n+offset):\n for v2 in range(0+offset,v1):\n edges[k][0] = v1\n edges[k][1] = v2\n k = k + 1\n return edges", "def partitions(n):\n for a in range(2,n//2+1):\n yield a, n-a", "def offspring_fertility(n1=4,n2=4):\n ary = np.zeros( (n1,n2, n1,n2, 3), float )\n for i in range(n1):\n for j in range(n2):\n for k in range(n1):\n for l in range(n2):\n # set group counter to zero (one counter is sufficient)\n gc1 = 0\n for index in [i,j,k,l]: \n if index in [0,1]: gc1+=1\n if gc1==0 or gc1==4:\n ary[i,j,k,l,0] = 1. # set mark at S0\n elif gc1==1 or gc1==3:\n ary[i,j,k,l,2] = 1. # set mark at S2\n else:\n ary[i,j,k,l,1] = 1. # set mark at S1\n return ary", "def partition_data(d,r,n):\n\tdem_part = [] #democrat partition\n\trep_part = [] #republican partition\n\tdem_perm = range(d) \n\trandom.shuffle(dem_perm) #democrat permutation\n\trep_perm = range(r) \n\trandom.shuffle(rep_perm) #republican permutation\n\tdem_size = d/n #size per partition\n\trep_size = r/n #size per partition\n\n\ti = 0\n\tj = 0\n\tcount = 0\n\tfor x in range(0,n):\n\t\tif count < d%n:\n\t\t\tdem_part.append(dem_perm[i:i+dem_size+1])\n\t\t\ti = i+dem_size+1\n\t\telif x == n-1:\n\t\t\tdem_part.append(dem_perm[i:d])\n\t\telse:\n\t\t\tdem_part.append(dem_perm[i:i+dem_size])\n\t\t\ti = i+dem_size\n\t\tif count < r%n:\n\t\t\trep_part.append(rep_perm[i:i+rep_size+1])\n\t\t\tj = j+rep_size+1\n\t\telif x == n-1:\n\t\t\trep_part.append(rep_perm[j:r])\n\t\telse:\n\t\t\trep_part.append(rep_perm[j:j+rep_size])\n\t\t\tj = j+rep_size\n\t\tcount = count + 1\n\n\treturn dem_part,rep_part", "def erdos_rennie_like(num_nodes, num_hedges, max_hedge_width, self_loops=False):\n if max_hedge_width > num_nodes:\n print(\"Error: the requested construction is flawed! There are not enoough nodes to support the requested max_hedge_width!\")\n return -1\n \n if ((max_hedge_width * 2) > num_nodes) and (self_loops == False):\n print(\"Error: the requested construction is flawed! There are not enough nodes to support the requested max_hedge_width without self loops!\")\n return -1\n \n node_ls = [] \n hedge_ls = [] #hedges will be encoded as ordered lists, where entry 0 is a list containing nodes in the tail and entry 1 is a list containing nodes in the head\n hedge_set = set() #to avoid repeated edges I use a set to keep track of the edges. The set will be converted back to a list when it is returned.\n \n for n in range(num_nodes): #initializes nodes\n node_ls.append(str(n))\n \n hedges_full = False\n while hedges_full == False:\n tail_width = random.randint(1,max_hedge_width)\n head_width = random.randint(1,max_hedge_width)\n new_hedge = hedge_maker(tail_width, head_width, node_ls, self_loops)\n hedge_set.add(new_hedge)\n if len(hedge_set) >= num_hedges:\n hedges_full = True\n \n for hedge in hedge_set: #formats the hedges into lists instead of tuples containing frozensets\n tail_ls = []\n head_ls = []\n for node in hedge[0]:\n tail_ls.append(node)\n \n for node in hedge[1]:\n head_ls.append(node)\n \n formatted_hedge = [tail_ls, head_ls]\n hedge_ls.append(formatted_hedge)\n \n return node_ls, hedge_ls", "def _makeEdges(self):\n self.edges = set()\n\n for i in range(self.size):\n self.edges.add(makePair(self.tour[i - 1], self.tour[i]))", "def GetBoundaryEdgesTri(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n\n node_arranger = NodeArrangementTri(p-1)[0]\n\n # CONCATENATE ALL THE EDGES MADE FROM ELEMENTS\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]]),axis=0)\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges", "def _iterative_cutting(g, p):\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res", "def _recursive_cutting(g, p, res=[]):\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res", "def break_into_chunks(in_lst):\n chunk_lst = []\n fst = True\n right_edge = 0\n\n for tup in in_lst:\n if fst:\n fst = False\n chunk_lst.append(tup)\n right_edge = tup[2]\n continue\n if tup[0] > right_edge:\n yield chunk_lst\n chunk_lst = []\n\n if tup[2] > right_edge:\n right_edge = tup[2]\n\n chunk_lst.append(tup)\n if chunk_lst:\n yield chunk_lst", "def generate_false_edges(true_edges, num_false_edges=5):\n nodes = list(set(chain.from_iterable(true_edges)))\n true_edges = set(true_edges)\n false_edges = set()\n \n while len(false_edges) < num_false_edges:\n # randomly sample two different nodes and check whether the pair exisit or not\n head, tail = np.random.choice(nodes, 2)\n if head != tail and ((head, tail) not in true_edges and (tail, head) not in true_edges) and ((head, tail) not in false_edges and (tail, head) not in false_edges):\n false_edges.add((head, tail)) \n false_edges = sorted(false_edges)\n \n return false_edges", "def load_partition_1d(counts, n_dom, split_fac):\n count_sums = np.cumsum(counts)\n # If we split at n+1, how many points on left and right?\n ptsL = count_sums[1:].astype(np.float64)\n ptsR = np.empty_like(ptsL)\n ptsR[:] = count_sums[-1] \n ptsR[1:] -= count_sums[:-2]\n\n\n # Best split of domains (+/- 1)\n left0 = np.clip(((n_dom * ptsL)/(ptsL+ptsR)).astype(np.int32), 1, n_dom-1)\n right0 = n_dom-left0\n\n left1 = np.minimum(left0+1,n_dom-1)\n right1 = n_dom-left1\n\n # whats the worst (left/right) load balance?\n p_per_proc0 = np.maximum((1+split_fac*np.log(left0))*ptsL/left0, (1+split_fac*np.log(right0))*ptsR/right0)\n p_per_proc1 = np.maximum((1+split_fac*np.log(left1))*ptsL/left1, (1+split_fac*np.log(right1))*ptsR/right1)\n\n idx_min0 = np.argmin(p_per_proc0)\n idx_min1 = np.argmin(p_per_proc1)\n \n if p_per_proc0[idx_min0] < p_per_proc1[idx_min1]:\n split = idx_min0\n n_left = left0[idx_min0]\n pval = p_per_proc0[idx_min0]\n else:\n split = idx_min1\n n_left = left1[idx_min1]\n pval = p_per_proc1[idx_min1]\n\n if split==0 or split==len(counts)-2:\n raise Exception('Tried to make a domain of pure ghosts. Something bad happened?')\n\n\n return split, n_left, pval", "def edge_generator(n1_nodes, n2_nodes, p_in, p_out):\n\n e_gen = lambda n1,n2,p: [e for e in [x for x in itertools.product(n1,n2) if x[0]!=x[1]] if random.random()<p]\n\n between_es = e_gen(n1_nodes, n2_nodes, p_out)\n in_n1 = e_gen(n1_nodes, n1_nodes, p_in)\n in_n2 = e_gen(n2_nodes, n2_nodes, p_in)\n\n return between_es + in_n1 + in_n2", "def gen_iter_graph(final_nodes, num_nodes):\n\t# create complete directed graph on m nodes (num_nodes)\n\tgraph = make_complete_graph(num_nodes)\n\n\tV = []\n\tE = []\n\ttotal_indeg = 0\n\n\tfor key in graph:\n\t\tV.append(key)\n\t\tE.append([key,graph[key]])\t\n\n\t# grow the graph by adding n - m (final_nodes - num_nodes) nodes\n\t# where each new node is connected to m nodes randomly chosen \n\t# from the set of existing nodes. Elimintate duplicates \n\t# to avoid parallel edges.\n\tfor node_added in range(num_nodes, final_nodes):\n\t\t# for key in graph:\n\t\t# \tfor value in graph[key]:\n\t\t# \t\ttotal_indeg += value\n\t\tV_prime = set()\n\t\t# choose randomly m nodes from V and add them to V_prime \n\t\t# where the probability of choosing node j is (indeg(j) + 1)/(totindeg + |V|)\n\t\t# i.e., call DPATrial (line 6 in pseudocode)\n\t\ttrial = DPATrial.DPATrial(num_nodes)\n\t\tV_prime = trial.run_trial(num_nodes)\n\t\tfor node in V_prime:\n\t\t\tV_prime.add(node)\n\t\tV.append(node_added)\n\t\tgraph[node_added] = V_prime\n\treturn graph", "def block_prior(m):\n n = 2 * m\n d = np.zeros((m, m, n, n))\n for i in range(m):\n for j in range(m):\n ii = 2 * i\n jj = 2 * j\n d[i, j, ii:ii + 2, jj:jj + 2] = 1\n return d.reshape(m * m, n * n)", "def randomEdgeLengths():\n\n angles = randomConvexQuad() #Random angles\n quad = unitQuad(angles, r.uniform(0.05, 0.95)) #For a random quad, with varied base edge length\n\n edges = []\n n = len(quad)\n for i in range(n): #For each edge in the quad, record the edge's length\n edges.append(( (quad[(i)%n][0] - quad[(i+1)%n][0])**2 + (quad[(i)%n][1] - quad[(i+1)%n][1])**2 )**.5) \n \n maxItem = max(edges)\n maxIndex = edges.index(maxItem) \n\n edges = edges[maxIndex:] + edges[:maxIndex] #Reorder the edge lengths so that the greatest one comes first\n for i, item in enumerate(edges): #And normalize them so that the greatest edge has length 1\n edges[i] = item / maxItem\n \n return edges", "def get_3away_pairs(kmers):\n k = len(kmers[0])\n if k == 1 or k==2:\n return []\n if k == 3:\n return [pair for pair in combinations(kmers, 2) if pair[0][0] != pair[1][0] and pair[0][1] != pair[1][1] and pair[0][2] != pair[1][2]]\n k_L = k//2\n k_R = k-k_L\n kmer_L_hashes = defaultdict(list)\n kmer_R_hashes = defaultdict(list)\n pairs = []\n kmers_L = []\n kmers_R = []\n for i, kmer in enumerate(kmers):\n kmer_L = kmer[:k_L]\n kmer_R = kmer[k_L:]\n #print(kmer_L)\n #print(kmer_R)\n kmers_L.append(kmer_L)\n kmers_R.append(kmer_R)\n kmer_L_hashes[kmer_to_int(kmer_L)] += [i]\n kmer_R_hashes[kmer_to_int(kmer_R)] += [i]\n for kmer_L_hash in kmer_L_hashes.values(): #same in first half\n if len(kmer_L_hash) > 1:\n kmer_L = kmers[kmer_L_hash[0]][:k_L] #first half\n pairs += [tuple(kmer_L + kmer for kmer in pair) for pair in get_3away_pairs([kmers[i][k_L:] for i in kmer_L_hash])] #differ by 3 in second half\n for kmer_R_hash in kmer_R_hashes.values(): #same in second half\n if len(kmer_R_hash) > 1:\n kmer_R = kmers[kmer_R_hash[0]][k_L:] #second half\n #print(kmer_R)\n pairs += [tuple(kmer + kmer_R for kmer in pair) for pair in get_3away_pairs([kmers[i][:k_L] for i in kmer_R_hash])] #differ by 3 in first half\n possible_pairs = []\n possible_pairs_L = get_1away_pairs(kmers_L)\n possible_pairs_R = get_2away_pairs(kmers_R)\n #print(kmers_L)\n #print(kmers_R)\n #print(possible_pairs_L)\n #print(possible_pairs_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n possible_pairs = []\n possible_pairs_L = get_2away_pairs(kmers_L)\n possible_pairs_R = get_1away_pairs(kmers_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n return(pairs)", "def test_even_chunk_sample(self):\n sample_gen = utils.shuffle_in_chunks(data_length=12, chunk_size=3)\n\n all_values = set()\n num_chunks = 0\n for sample in sample_gen:\n self.assertFalse(all_values & set(sample))\n all_values = all_values | set(sample)\n num_chunks += 1\n self.assertEqual(num_chunks, 4)\n self.assertCountEqual(all_values, list(range(12)))", "def get_sub_combinations(maxop):\n combo = collections.defaultdict(list)\n for numops in range(maxop+1):\n if numops:\n combo[numops, 1].append((numops-1,))\n for op1 in range(numops):\n combo[numops, 2].append((op1, numops - op1 - 1))\n for op2 in range(numops - op1):\n combo[numops, 3].append((op1, op2, numops - op1 - op2 - 1))\n return combo", "def num_44():\n def block_array(a, rows=3, cols=4, col_first=True, nodata=-1):\n \"\"\" a variant on array_split\n requires a N*m array\n \"\"\"\n s = np.array(a.shape)\n w = np.array([rows, cols])\n m = divmod(s, w)\n new_shape = w*m[0] + w*(m[1]!=0)\n ypad, xpad = new_shape - a.shape \n b = np.pad(a, pad_width=((0, ypad),(0, xpad)), \n mode='constant', \n constant_values=((nodata, nodata),(nodata, nodata)))\n rn, cn = new_shape\n x_s = np.arange(0, cn+cols, cols)[1:] #.tolist()\n y_s = np.arange(0, rn+rows, rows)[1:] #.tolist()\n print(\"x_s {}\\ny_s {}\".format(x_s, y_s))\n #c = np.array([i for i in np.hsplit(b, x_s) if len(i) > 0])\n c = np.array([i for i in np.split(b, x_s, axis=1) if len(i) > 0])\n d = np.array([i for i in np.split(c, y_s, axis=1) if len(i) > 0])\n e = d.swapaxes(0, 1)\n ix = np.in1d(e.ravel(), nodata).reshape(e.shape)\n f = np.ma.array(e, mask=ix, fill_value=-1)\n return b, c, d, e, f\n y, x = 9, 11\n a = np.arange(x*y).reshape(y,x)\n b, c, d, e, f = block_array(a)\n print(\"\\n{}\".format(num_44.__doc__)) \n for i in [a, b, c, d, e, f]:\n _f(i)\n return a, b, c, d, e, f", "def equalize_node_density(self, maximum_distance, maximum_angle_delta, greedy=True):\n print('network: splitting long edges...')\n \"\"\" Split edges which are very long. \"\"\"\n self.split_edges(maximum_distance)\n\n print('network: merging short edges...')\n vertices_to_remove = []\n edges_to_add = []\n \"\"\" Merge edges which are close together, and collect vertices/edges which should be removed/added. \"\"\"\n for section_id in self.sections:\n utils.print_progress(len(self.sections), prefix='merging edges')\n new_edges, redundant_vertices = self.merge_edges(self.sections[section_id], maximum_distance,\n maximum_angle_delta, greedy)\n vertices_to_remove.extend(redundant_vertices)\n edges_to_add.extend(new_edges)\n # Maintain the section list\n self.sections[section_id] = list(filter(lambda v: v not in redundant_vertices, self.sections[section_id]))\n\n \"\"\" Add the new edges and edge weights into the graph. \"\"\"\n for edge, weight in edges_to_add:\n utils.print_progress(len(edges_to_add), prefix='adding edges')\n new_edge = self.graph.add_edge(edge[0], edge[1], add_missing=False)\n self.edge_weights[new_edge] = weight\n\n \"\"\" Removing vertices reindexes the vertices and edges of the graph. Need to maintain external data \n structures to prevent data corruption. \"\"\"\n original_indices = self.graph.vertex_index.copy() # Property map will correct for reindexing\n self.graph.remove_vertex(vertices_to_remove, fast=True)\n # Vertices have now been reindexed. Update each section with the new vertex IDs.\n for section_id in self.sections:\n utils.print_progress(len(self.sections), prefix='reindexing vertices')\n self.sections[section_id] = [find_vertex(self.graph, original_indices, v)[0] for v in\n self.sections[section_id]]\n\n return self.graph.num_vertices()", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def spectral_modularity_partition(G):\n try:\n import numpy as np\n except:\n raise ImportError(\"spectral_partition() \\\n requires NumPy: http://scipy.org/\")\n\n\n k = np.matrix(G.degree().values())\n m = G.number_of_edges()\n B = nx.adj_matrix(G) - (k.transpose() * k) / (2.0 * m)\n eigenvalues, eigenvectors = np.linalg.eig(B)\n # sort and keep smallest nonzero \n index = np.argsort(eigenvalues)[-1] # -1 index is largest eigenvalue\n v2 = zip(np.real(eigenvectors[:, index]), G)\n \n C = [set(), set()]\n \n for (u, n) in v2:\n if u < 0:\n C[0].add(n)\n else:\n C[1].add(n)\n return C", "def compute_perm(parents):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L167\n\n # Order of last layer is random (chosen by the clustering algorithm).\n indices = []\n if len(parents) > 0:\n M_last = max(parents[-1]) + 1\n indices.append(list(range(M_last)))\n\n for parent in parents[::-1]:\n #print('parent: {}'.format(parent))\n\n # Fake nodes go after real ones.\n pool_singeltons = len(parent)\n\n indices_layer = []\n for i in indices[-1]:\n indices_node = list(np.where(parent == i)[0])\n assert 0 <= len(indices_node) <= 2\n #print('indices_node: {}'.format(indices_node))\n\n # Add a node to go with a singelton.\n if len(indices_node) == 1:\n indices_node.append(pool_singeltons)\n pool_singeltons += 1\n #print('new singelton: {}'.format(indices_node))\n # Add two nodes as children of a singelton in the parent.\n elif len(indices_node) == 0:\n indices_node.append(pool_singeltons+0)\n indices_node.append(pool_singeltons+1)\n pool_singeltons += 2\n #print('singelton childrens: {}'.format(indices_node))\n\n indices_layer.extend(indices_node)\n indices.append(indices_layer)\n\n # Sanity checks.\n for i,indices_layer in enumerate(indices):\n M = M_last*2**i\n # Reduction by 2 at each layer (binary tree).\n assert len(indices[0] == M)\n # The new ordering does not omit an indice.\n assert sorted(indices_layer) == list(range(M))\n\n return indices[::-1]", "def split_in_half(keys_56bits):\n left_keys, right_keys = keys_56bits[:28], keys_56bits[28:]\n return left_keys, right_keys", "def lap_split_n(img, n):\n levels = []\n\n print(\"inside lap_split_n function \")\n\n for i in range(n):\n img, hi = lap_split(img)\n levels.append(hi)\n levels.append(img)\n return levels[::-1]", "def find_components(edges, max_components=16):\n # Perform increasingly aggressive dilation until there are just a few\n # connected components.\n\n count = 21\n dilation = 5\n n = 1\n while count > 16:\n n += 1\n dilated_image = dilate(edges, N=3, iterations=n)\n dilated_image = np.uint8(dilated_image)\n contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n count = len(contours)\n # print dilation\n # Image.fromarray(edges).show()\n # Image.fromarray(255 * dilated_image).show()\n return contours", "def make_heuristic_list():\n heuristics = [lambda n_p: (-n_p[0], (n_p[1] >> 16), (n_p[1] & 0xffff)),\n lambda n_p: (-n_p[0], (n_p[1] & 0xffff), (n_p[1] >> 16)),\n lambda n_p: ((n_p[1] >> 16), -n_p[0], (n_p[1] & 0xffff)),\n lambda n_p: ((n_p[1] & 0xffff), -n_p[0], (n_p[1] >> 16)),\n lambda n_p: ((n_p[1] >> 16), (n_p[1] & 0xffff)),\n lambda n_p: ((n_p[1] & 0xffff), (n_p[1] >> 16))]\n samples = 50\n for ij in range(samples):\n for ratio in range(5):\n split = ij / float(samples - 1)\n heuristics.append(make_angle_heuristic(split, 1.0 - split, ratio))\n return heuristics", "def yieldNGEpairs(array):\n stack = container.Stack()\n \n def lessthan(element):\n \"\"\"prevent the top (> element) loss\"\"\"\n def predicate(top):\n if top < element:\n return True\n stack.push(top)\n return False\n return predicate\n \n for element in array:\n if stack.isempty() or stack.top() > element:\n stack.push(element)\n continue\n \n for top in itertools.takewhile(lessthan(element), stack.popall()):\n yield top, element\n stack.push(element)\n\n for top in stack.popall():\n yield top, None", "def thin_edges(magnitude, angle, low):\n # define footprints for the angle cases (1, 2, 3 and 4)\n t1 = time.time()\n f1 = np.array([[1, 0, 1]])\n f2 = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]]) # hier had ik f4 verwacht, ma de hoeken kloppen niet\n f3 = np.array([[1], [0], [1]])\n f4 = np.array([[0, 0, 1], [0, 0, 0], [1, 0, 0]]) # hier had ik f2 verwacht\n # define conditions\n t2 = time.time()\n cond1 = magnitude >= ndimage.maximum_filter(magnitude, footprint=f1, mode='constant', cval=-np.inf) # True/False matrix\n cond2 = magnitude >= ndimage.maximum_filter(magnitude, footprint=f2, mode='constant', cval=-np.inf) # True/False matrix\n cond3 = magnitude >= ndimage.maximum_filter(magnitude, footprint=f3, mode='constant', cval=-np.inf) # True/False matrix\n cond4 = magnitude >= ndimage.maximum_filter(magnitude, footprint=f4, mode='constant', cval=-np.inf) # True/False matrixcond1 = magnitude >= ndimage.maximum_filter(magnitude, footprint=f1, mode='constant', cval=-np.inf) # True/False matrix\n t3 = time.time()\n pos_ang = np.where(angle < 0, angle + 180, angle) # make the negative angles positive, works for this application\n t4 = time.time()\n # transform the angle matrix to a matrix of 1/0, indicating wether the element is the highest along its gradient\n ang_to_bool = np.where(pos_ang <= 22.5, cond1, np.where(pos_ang <= 67.5, cond2, np.where(pos_ang <= 112.5, cond3, np.where(\n pos_ang <= 157.5, cond4, np.where(pos_ang > 157.5, cond1, pos_ang)))))\n t5 = time.time()\n filtered = np.where(magnitude > low, ang_to_bool, 0) # keep only the bools of the values higher than the low th\n t6 = time.time()\n remasked = np.where(filtered, magnitude, 0) # og waardes er weer over trekken\n t7 = time.time()\n\n global timed\n if timed:\n print(\"EDGE THINNING:\")\n print(f\"Defining footprints: {t2-t1}s\")\n print(f\"Defining conditions: {t3-t2}s\")\n print(f\"Calculating pos_ang: {t4-t3}s\")\n print(f\"Calculating ang_to_bool: {t5-t4}s\")\n print(f\"Filtering: {t6-t5}s\")\n print(f\"Remasking: {t7-t6}s\")\n print(\"-----------------------------------\")\n print(f\"TOTAL TIME: {t7-t1}s\\n\")\n\n return remasked", "def permuteEdgeTypes(self):\n\t\tpermuted_graph = copy.copy(self)\n\t\t# swap about half the edges\n\t\ti = len(self.graph)/2\n\t\twhile i > 0:\n\t\t\t# swap \n\t\t\tsourceA, targetA = random.choice(permuted_graph.graph.keys())\n\t\t\tiTypeA, emA = permuted_graph.graph[(sourceA, targetA)]\n\t\t\tsourceB, targetB = random.choice(permuted_graph.graph.keys())\n\t\t\tiTypeB, emB = permuted_graph.graph[(sourceB, targetB)]\n\t\t\tpermuted_graph.graph[(sourceA, targetA)] = (iTypeB, emB)\n\t\t\tpermuted_graph.graph[(sourceB, targetB)] = (iTypeA, emA)\n\n\t\t\ti -= 1\n\n\t\t# return a new graph object\t\t\n\t\treturn permuted_graph", "def part_2():\n return itertools.permutations(range(5, 10))", "def better_partition(graph, part1, part2, independent_set_extraction_strategy):\n\n # TODO: When there are more hyperplanes it often chooses the resulting partition\n # TODO: as best even though it results in more colors (e.g. for DSJC 125.5)\n\n if part2 is None or len(part2) == 0:\n return True\n\n if part1 is None or len(part1) == 0:\n return False\n\n # Remove colors from one endpoint of each illegal edge in each partition.\n nodes_to_delete1 = nodes_to_delete(graph, part1, strategy=independent_set_extraction_strategy)\n nodes_to_color1 = {n for n in graph.nodes() if n not in nodes_to_delete1}\n nr_of_colors1 = len(set(part1.values()))\n\n nodes_to_delete2 = nodes_to_delete(graph, part2, strategy=independent_set_extraction_strategy)\n nodes_to_color2 = {n for n in graph.nodes() if n not in nodes_to_delete2}\n nr_of_colors2 = len(set(part2.values()))\n\n avg1 = float(len(nodes_to_color1)) / nr_of_colors1\n avg2 = float(len(nodes_to_color2)) / nr_of_colors2\n\n return avg1 > avg2", "def get_splits(ntot, nper):\n beglist = numpy.arange(0,ntot,nper)\n endlist = numpy.arange(0,ntot,nper) + nper - 1\n\n if (ntot % nper) != 0:\n endlist[-1] = ntot-1\n return beglist, endlist", "def get_chunks_ranges(\n total: int, *, chunk_size: int = None, parts: int = None\n) -> List[Tuple[int, int]]:\n\n assert (chunk_size is not None) ^ (\n parts is not None\n ), \"Exactly one of chunk_size or parts must be provided\"\n\n if chunk_size is not None:\n if chunk_size >= total:\n return [(0, total)]\n\n steps = np.arange(0, total, chunk_size, dtype=np.int64)\n ans = list(zip(steps[:-1], steps[1:]))\n if ans[-1][-1] < total:\n ans.append((ans[-1][-1], total))\n ans[-1] = (ans[-1][0], min(ans[-1][-1], total))\n return ans\n\n elif parts is not None:\n chunk_size = np.ceil(total / parts)\n return get_chunks_ranges(total, chunk_size=chunk_size)\n\n assert False, \"should not reach here\"" ]
[ "0.5971374", "0.58695173", "0.5826751", "0.5813039", "0.57311904", "0.57143676", "0.5701675", "0.5663653", "0.5646929", "0.56423926", "0.5626546", "0.560861", "0.55968225", "0.55719924", "0.55631095", "0.5513152", "0.5477863", "0.5472609", "0.5460661", "0.5448054", "0.54350835", "0.5401846", "0.5401286", "0.5398355", "0.53969014", "0.5380748", "0.5369611", "0.53599274", "0.5349893", "0.5346726", "0.5344109", "0.53417933", "0.53286374", "0.5317908", "0.5310529", "0.53001493", "0.5299976", "0.52976304", "0.5296043", "0.52775013", "0.5275794", "0.52678597", "0.52559817", "0.5255538", "0.5254548", "0.52493834", "0.52346385", "0.5230867", "0.52267426", "0.52165663", "0.52130586", "0.5211984", "0.5206215", "0.5197048", "0.5197047", "0.51908654", "0.5187691", "0.5158726", "0.51562643", "0.5155674", "0.51555175", "0.51522887", "0.5150516", "0.51493776", "0.5131376", "0.51244926", "0.51183575", "0.5114966", "0.510342", "0.51010185", "0.50990856", "0.509582", "0.50830734", "0.5079452", "0.50777704", "0.5073708", "0.50692725", "0.50661457", "0.5063878", "0.50546414", "0.5053603", "0.50531346", "0.5047782", "0.50462395", "0.50438863", "0.50437796", "0.5039888", "0.5029317", "0.502522", "0.5025062", "0.5024311", "0.5023587", "0.50202817", "0.50171256", "0.50171185", "0.50161546", "0.50110126", "0.50095224", "0.50075597", "0.50008214" ]
0.5720235
5
phase5 requires a 4edge combo where none of the edges are in the zplane. phase4 will put a 4edge combo into that state. There are 12!/(4!8!) or 495 different 4edge combinations. Try them all and see which one has the lowest phase4 cost.
def find_first_four_edges_to_pair(self): original_state = self.state[:] original_solution = self.solution[:] original_solution_len = len(self.solution) results = [] for wing_str_index, wing_str_combo in enumerate(itertools.combinations(wing_strs_all, 4)): wing_str_combo = sorted(wing_str_combo) self.state = original_state[:] self.solution = original_solution[:] self.lt_phase4.wing_strs = wing_str_combo if self.lt_phase4.solve(): phase4_solution = self.solution[original_solution_len:] phase4_solution_len = len(phase4_solution) results.append((phase4_solution_len, wing_str_combo)) logger.debug( f"{wing_str_index+1}/495 {wing_str_combo} phase-4 solution length is {phase4_solution_len}" ) else: logger.debug(f"{wing_str_index+1}/495 {wing_str_combo} phase-4 solution length is >= 4 ") self.lt_phase4.fh_txt_cache = {} self.state = original_state[:] self.solution = original_solution[:] results.sort() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def phase_5(self):\n test_board_1 = board(5, 5, [1, 1], [0, 0])\n render = Render_engine('terminal', test_board_1)\n\n render.render_terminal(test_board_1)", "def cost_function_SO4(params: list):\n cost = 0\n SO4 = SO4_circuit(params[0], params[1], params[2], params[3], params[4], params[5])\n\n for i in range(4):\n for j in range(4):\n cost += abs(SO4[i][j] - U[i][j])\n\n # identity_goal = SO4 @ np.linalg.inv(U)\n # for i in range(4):\n # for j in range(4):\n # cost += abs(identity_goal[i][j] - I4[i][j])\n\n return cost", "def rk4_singleStep(odes, state, parameters, dt):\n k1 = dt * odes(state, parameters)\n k2 = dt * odes(state + 0.5 * k1, parameters)\n k3 = dt * odes(state + 0.5 * k2, parameters)\n k4 = dt * odes(state + k3, parameters)\n return state + (k1 + 2 * k2 + 2 * k3 + k4) / 6", "def ramp5p(params, phase, args=dict(n=5, guess=[1, -0.32, 2, -0.08, 2])):\n # 2013-12-07 14:08 IJMC: Created.\n\n return params[0] * (1. + np.exp(-params[1]*phase + params[2]) + \\\n np.exp(-params[3]*phase + params[4]))", "def ex_4pdeer(param): \r\n param = _parsargs(param,npar=1) \r\n \r\n # Dipolar pathways\r\n lam = param[0]\r\n pathways = [\r\n [1-lam],\r\n [lam, 0]\r\n ]\r\n return pathways", "def test_4_2_5D_cube_splits(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0), (1, 1, 1, 0, 0), (1, 1, 1, 1, 0),\n (1, 1, 1, 0, 1), (1, 1, 0, 1, 0), (1, 1, 0, 1, 1),\n (1, 1, 0, 0, 1), (1, 0, 1, 0, 0), (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1), (1, 0, 0, 0, 1), (0, 1, 0, 0, 0),\n (0, 1, 1, 0, 0), (0, 1, 1, 1, 0), (0, 1, 1, 1, 1),\n (0, 1, 1, 0, 1), (0, 1, 0, 1, 0), (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1), (0, 0, 1, 0, 1), (0, 0, 0, 1, 0),\n (0, 0, 0, 1, 1), (0, 0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.0), (0.0, 0.0, 0.5, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5, 0.5), (0.0, 0.5, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.0, 0.0, 0.0), (0.0, 0.5, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.5, 0.0, 0.5), (0.0, 0.5, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0, 0.0), (0.5, 0.0, 0.0, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.0, 0.5), (0.5, 0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.0, 0.0), (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.25, 0.25, 0.25, 0.25, 0.25), (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5), (1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 0.5), (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 1.0, 0.5, 0.5, 1.0), (1.0, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0), (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 0.5, 1.0, 1.0), (1.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5), (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0), (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 1.0), (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5), (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0), (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5), (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5, 0.5), (1.0, 0.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.0, 0.5, 0.0),\n (1.0, 0.0, 0.5, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0, 0.0),\n (1.0, 0.0, 0.5, 0.5, 0.0), (1.0, 0.5, 0.0, 0.5, 0.5),\n (1.0, 0.5, 0.0, 0.0, 0.5), (1.0, 0.5, 0.0, 0.0, 0.0),\n (1.0, 0.5, 0.0, 0.5, 0.0), (1.0, 0.5, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25, 0.25), (1.0, 1.0, 0.0, 0.5, 0.5),\n (1.0, 1.0, 0.0, 0.0, 0.5), (1.0, 1.0, 0.0, 0.5, 0.0),\n (1.0, 1.0, 0.5, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0, 0.0),\n (1.0, 1.0, 0.5, 0.5, 0.0), (0.5, 1.0, 0.0, 0.5, 0.5),\n (0.5, 1.0, 0.0, 0.0, 0.5), (0.5, 1.0, 0.0, 0.0, 0.0),\n (0.5, 1.0, 0.0, 0.5, 0.0), (0.5, 1.0, 0.5, 0.0, 0.5),\n (0.5, 1.0, 0.5, 0.0, 0.0), (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25, 0.25), (1.0, 1.0, 1.0, 0.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.0), (1.0, 0.5, 1.0, 0.0, 0.5),\n (1.0, 0.5, 1.0, 0.0, 0.0), (1.0, 0.5, 1.0, 0.5, 0.0),\n (0.5, 1.0, 1.0, 0.0, 0.5), (0.5, 1.0, 1.0, 0.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0), (0.5, 0.5, 1.0, 0.0, 0.5),\n (0.5, 0.5, 1.0, 0.0, 0.0), (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.75, 0.25, 0.25), (1.0, 1.0, 0.5, 1.0, 0.0),\n (1.0, 0.5, 1.0, 1.0, 0.0), (1.0, 0.5, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 1.0, 0.0), (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 0.5, 1.0, 1.0, 0.0), (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.75, 0.25), (1.0, 1.0, 0.5, 0.0, 1.0),\n (1.0, 0.5, 1.0, 0.0, 1.0), (1.0, 0.5, 0.5, 0.0, 1.0),\n (0.5, 1.0, 1.0, 0.0, 1.0), (0.5, 1.0, 0.5, 0.0, 1.0),\n (0.5, 0.5, 1.0, 0.0, 1.0), (0.5, 0.5, 0.5, 0.0, 1.0),\n (0.75, 0.75, 0.75, 0.25, 0.75), (1.0, 1.0, 0.0, 1.0, 0.5),\n (1.0, 0.5, 0.0, 1.0, 0.5), (1.0, 0.5, 0.0, 1.0, 0.0),\n (0.5, 1.0, 0.0, 1.0, 0.5), (0.5, 1.0, 0.0, 1.0, 0.0),\n (0.5, 0.5, 0.0, 1.0, 0.5), (0.5, 0.5, 0.0, 1.0, 0.0),\n (0.75, 0.75, 0.25, 0.75, 0.25), (1.0, 1.0, 0.0, 0.5, 1.0),\n (1.0, 0.5, 0.0, 1.0, 1.0), (1.0, 0.5, 0.0, 0.5, 1.0),\n (0.5, 1.0, 0.0, 1.0, 1.0), (0.5, 1.0, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0, 1.0), (0.5, 0.5, 0.0, 0.5, 1.0),\n (0.75, 0.75, 0.25, 0.75, 0.75), (1.0, 0.5, 0.0, 0.0, 1.0),\n (0.5, 1.0, 0.0, 0.0, 1.0), (0.5, 0.5, 0.0, 0.0, 1.0),\n (0.75, 0.75, 0.25, 0.25, 0.75), (1.0, 0.0, 1.0, 0.5, 0.5),\n (1.0, 0.0, 1.0, 0.0, 0.5), (1.0, 0.0, 1.0, 0.5, 0.0),\n (0.5, 0.0, 1.0, 0.5, 0.5), (0.5, 0.0, 1.0, 0.0, 0.5),\n (0.5, 0.0, 1.0, 0.0, 0.0), (0.5, 0.0, 1.0, 0.5, 0.0),\n (0.75, 0.25, 0.75, 0.25, 0.25), (1.0, 0.0, 1.0, 1.0, 0.5),\n (1.0, 0.0, 0.5, 1.0, 0.5), (1.0, 0.0, 0.5, 1.0, 0.0),\n (0.5, 0.0, 1.0, 1.0, 0.5), (0.5, 0.0, 1.0, 1.0, 0.0),\n (0.5, 0.0, 0.5, 1.0, 0.5), (0.5, 0.0, 0.5, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.75, 0.25), (1.0, 0.0, 1.0, 0.5, 1.0),\n (1.0, 0.0, 0.5, 1.0, 1.0), (1.0, 0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 1.0, 1.0, 1.0), (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0, 1.0), (0.5, 0.0, 0.5, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75, 0.75), (1.0, 0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 1.0, 0.0, 1.0), (0.5, 0.0, 0.5, 0.0, 1.0),\n (0.75, 0.25, 0.75, 0.25, 0.75), (1.0, 0.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 0.0, 1.0, 0.5), (0.5, 0.0, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.25, 0.75, 0.25), (1.0, 0.0, 0.0, 0.5, 1.0),\n (0.5, 0.0, 0.0, 1.0, 1.0), (0.5, 0.0, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.5), (0.0, 1.0, 0.0, 0.0, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.0), (0.0, 1.0, 0.5, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0, 0.0), (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.25, 0.75, 0.25, 0.25, 0.25), (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.0, 0.5), (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5), (0.0, 0.5, 1.0, 0.0, 0.5),\n (0.0, 0.5, 1.0, 0.0, 0.0), (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.25, 0.75, 0.75, 0.25, 0.25), (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5), (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5), (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.5), (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.75, 0.25), (0.0, 1.0, 1.0, 0.5, 1.0),\n (0.0, 1.0, 0.5, 1.0, 1.0), (0.0, 1.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 1.0, 1.0, 1.0), (0.0, 0.5, 1.0, 0.5, 1.0),\n (0.0, 0.5, 0.5, 1.0, 1.0), (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75, 0.75), (0.0, 1.0, 0.5, 0.0, 1.0),\n (0.0, 0.5, 1.0, 0.0, 1.0), (0.0, 0.5, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.75, 0.25, 0.75), (0.0, 1.0, 0.0, 1.0, 0.5),\n (0.0, 0.5, 0.0, 1.0, 0.5), (0.0, 0.5, 0.0, 1.0, 0.0),\n (0.25, 0.75, 0.25, 0.75, 0.25), (0.0, 1.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0, 1.0), (0.0, 0.5, 0.0, 0.5, 1.0),\n (0.25, 0.75, 0.25, 0.75, 0.75), (0.0, 0.5, 0.0, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.0, 0.5), (0.0, 0.0, 1.0, 0.5, 0.0),\n (0.25, 0.25, 0.75, 0.25, 0.25), (0.0, 0.0, 1.0, 1.0, 0.5),\n (0.0, 0.0, 0.5, 1.0, 0.5), (0.0, 0.0, 0.5, 1.0, 0.0),\n (0.25, 0.25, 0.75, 0.75, 0.25), (0.0, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.0, 0.5, 1.0, 1.0), (0.0, 0.0, 0.5, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75, 0.75), (0.0, 0.0, 0.5, 0.0, 1.0),\n (0.25, 0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(1, 1, 1, 1, 1): [(1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0),\n (1.0, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0.75, 0.75, 0.75, 0.75, 0.75),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0),\n (0.5, 1.0, 0.5, 1.0, 1.0)],\n (0.25, 0.75, 0.75, 0.75, 0.25): [(0.5, 1.0, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0, 1, 1, 1, 0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (\n 0.5, 0.5, 1.0, 0.5, 0.5)],\n (0.0, 0.0, 1.0, 0.5, 1.0): [(0.5, 0.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.0, 0.0, 0.5, 0.5, 1.0),\n (0, 0, 1, 1, 1),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.5, 1.0, 0.5, 1.0),\n (0, 0, 1, 0, 1),\n (0.5, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.25, 0.25, 0.75, 0.75, 0.75),\n (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (\n 0.25, 0.25, 0.75, 0.25, 0.75)]}\n\n init_triangulation(5, 1, check, nn_checks)", "def make_state_appliable_4ch(state):\n size = len(state)\n st_appl = np.zeros((size,)*4, dtype=complex)\n for p1 in range(size):\n for p2 in range(size):\n for p3 in range(size):\n for p4 in range(size):\n st_appl[p1, p2, p3, p4] = state[p1, p2, p3, p4] * sqrt(factorial(p1) * factorial(p2) * factorial(p3) * factorial(p4))\n return st_appl", "def refugia_adj_5_full_2_iter5 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def ramp5n(params, phase, args=dict(n=5, guess=[1., 20, 83, 8.1, -0.1])): #-0.16, 4.4, -0.16, 0.43])):\n # 2013-12-07 14:08 IJMC: Created.\n\n return params[0] * (1. - np.exp(-params[1]*phase + params[2]) - \\\n np.exp(-params[3]*phase + params[4]))", "def fkine_ur5(q):\n \n \n T1 = dh(0.08916, +q[0], 0.0, +pi/2)\n T2 = dh( 0.0, +q[1], -0.425, 0.0)\n T3 = dh( 0.0, +q[2], -0.392, 0.0)\n T4 = dh(0.10915, +q[3], 0.0, +pi/2)\n T5 = dh(0.09465, +pi+q[4], 0.0, +pi/2)\n T6 = dh( 0.0823, +pi+q[5], 0.0, 0.0)\n \n # Efector final con respecto a la base\n T = np.dot(np.dot(np.dot(np.dot(np.dot(T1,T2),T3),T4),T5),T6)\n return T", "def phosphorene_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [-s/2, -ay/2, h], 0),\n ('B', [ s/2, -ay/2, 0], 0),\n ('C', [-s/2 + ax/2, 0, 0], 0),\n ('D', [ s/2 + ax/2, 0, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5')\n )\n\n return lat", "def test_4_1_5D_cube_init(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0),\n (1, 1, 1, 0, 0), (1, 1, 1, 1, 0), (1, 1, 1, 0, 1),\n (1, 1, 0, 1, 0),\n (1, 1, 0, 1, 1), (1, 1, 0, 0, 1), (1, 0, 1, 0, 0),\n (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1),\n (1, 0, 0, 0, 1), (0, 1, 0, 0, 0), (0, 1, 1, 0, 0),\n (0, 1, 1, 1, 0),\n (0, 1, 1, 1, 1), (0, 1, 1, 0, 1), (0, 1, 0, 1, 0),\n (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1),\n (0, 0, 1, 0, 1), (0, 0, 0, 1, 0), (0, 0, 0, 1, 1),\n (0, 0, 0, 0, 1),\n (0.5, 0.5, 0.5, 0.5, 0.5)]\n\n nn_checks = {(0, 1, 0, 1, 1): [(0, 0, 0, 0, 0), (\n 0.5, 0.5, 0.5, 0.5, 0.5), (0, 0, 0, 1, 1), (1, 1, 0, 1, 1),\n (0, 1, 0, 0, 0),\n (0, 1, 0, 0, 1),\n (0, 1, 0, 1, 0),\n (0, 0, 0, 0, 1),\n (1, 1, 1, 1, 1),\n (0, 1, 1, 1, 1),\n (0, 0, 0, 1, 0)]}\n\n init_triangulation(5, 0, check, nn_checks)", "def ramp4n(params, phase, args=dict(n=5, guess=[1, -3.7e-4, -0.94, 0.087, -1.08])):\n # 2013-12-07 14:08 IJMC: Created.\n\n return params[0] * (1. - np.exp(-params[1]*phase + params[2]) + \\\n params[3] * (phase - 0.5) + \\\n params[4] * (phase - 0.5)**2)", "def step5(self):\n\t\tself.j = self.k\n\t\tif self.b[self.k] == 'e':\n\t\t\ta = self.m()\n\t\t\tif a > 1 or (a == 1 and not self.cvc(self.k-1)):\n\t\t\t\tself.k = self.k - 1\n\t\tif self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:\n\t\t\tself.k = self.k -1", "def refugia_adj_5_full_2_iter4 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def calculate_vn6_over_vn4(vn_data_array, outputFileName):\n vn_data_array = array(vn_data_array)\n nev = len(vn_data_array[:, 0])\n dN = real(vn_data_array[:, 0])\n Q1 = dN*vn_data_array[:, 1]\n Q2 = dN*vn_data_array[:, 2]\n Q3 = dN*vn_data_array[:, 3]\n Q4 = dN*vn_data_array[:, 4]\n Q5 = dN*vn_data_array[:, 5]\n Q6 = dN*vn_data_array[:, 6]\n\n # two-particle correlation\n N2_weight = dN*(dN - 1.)\n Q2_2 = abs(Q2)**2. - dN\n\n # four-particle correlation\n N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)\n Q2_4 = ((abs(Q2)**4.) - 2.*real(Q4*conj(Q2)*conj(Q2))\n - 4.*(dN - 2.)*(abs(Q2)**2.) + abs(Q4)**2.\n + 2*dN*(dN - 3.))\n\n # six-particle correlation\n N6_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)*(dN - 4.)*(dN - 5.)\n Q2_6 = (abs(Q2)**6. + 9*(abs(Q4)**2.)*(abs(Q2)**2.)\n - 6.*real(Q4*Q2*conj(Q2)*conj(Q2)*conj(Q2))\n + 4.*real(Q6*conj(Q2)*conj(Q2)*conj(Q2))\n - 12.*real(Q6*conj(Q4)*conj(Q2))\n + 18.*(dN - 4.)*real(Q4*conj(Q2)*conj(Q2))\n + 4.*(abs(Q6)**2.)\n - 9.*(dN - 4.)*((abs(Q2)**4.) + (abs(Q4)**2.))\n + 18.*(dN - 5.)*(dN - 2.)*(abs(Q2)**2.)\n - 6.*dN*(dN - 4.)*(dN - 5.))\n\n # calcualte observables with Jackknife resampling method\n r2_array = zeros(nev)\n gamma1_array = zeros(nev)\n for iev in range(nev):\n array_idx = [True]*nev\n array_idx[iev] = False\n array_idx = array(array_idx)\n\n # C_n{4}\n C_2_2 = mean(Q2_2[array_idx])/mean(N2_weight[array_idx])\n C_2_4 = (mean(Q2_4[array_idx])/mean(N4_weight[array_idx])\n - 2.*(C_2_2**2.))\n C_2_6 = (mean(Q2_6[array_idx])/mean(N6_weight[array_idx])\n - 9.*C_2_2*mean(Q2_4[array_idx])/mean(N4_weight[array_idx])\n + 12.*(C_2_2**3.))\n if C_2_6 > 0. and C_2_4 < 0. and C_2_2 > 0.:\n v2_2 = sqrt(C_2_2)\n v2_6 = (C_2_6/4.)**(1./6.)\n v2_4 = (-C_2_4)**(1./4.)\n r2_array[iev] = v2_6/v2_4\n gamma1_array[iev] = (-6.*sqrt(2)*(v2_4**2.)*(v2_4 - v2_6)\n /(v2_2**2. - v2_4**2.)**(1.5))\n\n r2_mean = mean(r2_array)\n r2_err = sqrt((nev - 1.)/nev*sum((r2_array - r2_mean)**2.))\n gamma1_mean = mean(gamma1_array)\n gamma1_err = sqrt((nev - 1.)/nev*sum((gamma1_array - gamma1_mean)**2.))\n\n f = open(outputFileName, 'w')\n f.write(\n \"# n vn{6}/vn{4} (vn{6}/vn{4})_err gamma_1 gamma_1_err\\n\")\n f.write(\"%d %.10e %.10e %.10e %.10e\\n\"\n % (2, r2_mean, r2_err, gamma1_mean, gamma1_err))\n f.close()\n return", "def refugia_adj_5_simsplit_4epochs_iter5 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def test_dphase(self):\n model = BDF(debug=False)\n node1, c1, t1 = 100, 3, 0.3\n node2, c2, t2 = 101, 4, 0.4\n sid = 42\n card_lines = ['DPHASE', sid, node1, c1, t1, node2, c2, t2]\n model.add_card(card_lines, card_lines[0], comment='', is_list=True,\n has_none=True)\n model.add_grid(100, [0., 0., 0.])\n model.add_grid(101, [0., 0., 0.])\n model.validate()\n model.cross_reference()\n #print(model.dphases[42])\n save_load_deck(model)", "def smooth5(size: int) -> int:\n if size < 6:\n return size\n if not size % 2:\n return size\n\n new = np.inf\n power5 = 1\n while power5 < size:\n power35 = power5\n while power35 < size:\n power2 = 2 ** ((-int(-size // power35) - 1).bit_length())\n n = power2 * power35\n if n == size:\n return new\n elif n < new:\n new = n\n power35 *= 3\n if power35 == size:\n return new\n if power35 < new:\n new = power35\n power5 *= 5\n if power5 == size:\n return new\n if power5 < new:\n new = power5\n return new", "def ex_ridme5(param): \r\n param = _parsargs(param, npar=6) \r\n\r\n # Dipolar pathways\r\n lam = param.copy()\r\n pathways = [[] for _ in lam]\r\n pathways[0] = [lam[0]]\r\n pathways[1] = [lam[1], 0, 1]\r\n pathways[2] = [lam[2], 0, 2]\r\n pathways[3] = [lam[3], 0, 3]\r\n pathways[4] = [lam[4], 0, 4]\r\n pathways[5] = [lam[5], 0, 5]\r\n return pathways", "def prob4(d = 500): \n #import the plane data\n planeData = np.load(\"plane.npy\")\n \n tplane = planeData[:,0]\n alpha = np.deg2rad(planeData[:,1])\n beta = np.deg2rad(planeData[:,2])\n \n l = len(tplane)\n \n #define x and y functions\n def x(n):\n# Gives x position\n return d * np.tan(beta[n]) / (np.tan(beta[n]) - np.tan(alpha[n]))\n def y(n):\n# Gives y position\n return d * np.tan(beta[n]) * np.tan(alpha[n]) / (np.tan(beta[n]) - np.tan(alpha[n]))\n \n #define x and y prime as we will see them\n def xprime(n):\n# Gives the approximate derivative of x\n if n == 0:\n return fdq1(x, n, h = 1)\n elif n == l-1:\n return bdq1(x, n, h = 1)\n elif n > 0 and n < l:\n return cdq2(x, n, h = 1)\n else:\n return 0\n \n def yprime(n):\n# Gives the approximate derivative of y\n if n == 0:\n return fdq1(y, n, h = 1)\n elif n == l-1:\n return bdq1(y, n, h = 1)\n elif n > 0 and n < l:\n return cdq2(y, n, h = 1)\n else:\n return 0\n \n #define speed from x and y prime\n def speed(n):\n# print(\"speed(n) where n = \" + str(n))\n return np.sqrt((xprime(n))**2 + (yprime(n))**2)\n \n #Finally get the speed from the information we have\n spd = []\n X = []\n Y = []\n for i in range(0, l):\n spd.append(speed(i))\n X.append(x(i))\n Y.append(y(i))\n \n return spd\n \n raise NotImplementedError(\"Problem 4 Incomplete\")", "def ramp4p(params, phase, args=dict(n=5, guess=[1, -0.068, 2.33, 0.933, -20.5])):\n # 2013-12-07 14:08 IJMC: Created.\n\n return params[0] * (1. + np.exp(-params[1]*phase + params[2]) + \\\n params[3] * (phase - 0.5) + \\\n params[4] * (phase - 0.5)**2)", "def eo_edges(self):\n logger.info(\"eo_edges called\")\n permutations = []\n original_state = self.state[:]\n original_solution = self.solution[:]\n tmp_solution_len = len(self.solution)\n\n # Build a list of the wing strings at each midge\n wing_strs = []\n\n for _, square_index, partner_index in midges_recolor_tuples_555:\n square_value = self.state[square_index]\n partner_value = self.state[partner_index]\n wing_str = square_value + partner_value\n wing_str = wing_str_map[square_value + partner_value]\n wing_strs.append(wing_str)\n\n # build a list of all possible EO permutations...an even number of edges must be high\n for num in range(4096):\n num = str(bin(num)).lstrip(\"0b\").zfill(12)\n if num.count(\"1\") % 2 == 0:\n permutations.append(list(map(int, num)))\n\n # Put all 2048 starting states in a file and point ida-via-graph\n # at the file so it can solve all of them and apply the one that is the shortest.\n lr_center_stage_states = []\n eo_outer_orbit_states = []\n eo_inner_orbit_states = []\n\n for permutation in permutations:\n must_be_uppercase = []\n must_be_lowercase = []\n self.state = original_state[:]\n\n for wing_str, uppercase in zip(wing_strs, permutation):\n if uppercase:\n must_be_uppercase.append(wing_str)\n else:\n must_be_lowercase.append(wing_str)\n\n # logger.info(\"%s: %s permutation %s\" % (self, index, \"\".join(map(str, permutation))))\n self.edges_flip_orientation(must_be_uppercase, must_be_lowercase)\n\n # build lists of the states that we need to find state_indexes for\n lr_center_stage_states.append(self.lt_phase3_lr_center_stage.state())\n eo_outer_orbit_states.append(self.lt_phase3_eo_outer_orbit.state())\n eo_inner_orbit_states.append(self.lt_phase3_eo_inner_orbit.state())\n\n # now we have a huge list of states to lookup, do a binary search on multiple states at once (this is drastically faster\n # than binary searching for them individually). state_index_multiple() will return a dict where the state is the key\n # and the state_index is the value.\n lr_center_stage_eo_inner_orbit_state_indexes = self.lt_phase3_lr_center_stage.state_index_multiple(\n lr_center_stage_states\n )\n eo_outer_orbit_state_indexes = self.lt_phase3_eo_outer_orbit.state_index_multiple(eo_outer_orbit_states)\n eo_inner_orbit_state_indexes = self.lt_phase3_eo_inner_orbit.state_index_multiple(eo_inner_orbit_states)\n\n # build a list of tuples of the state indexes\n pt_state_indexes = []\n for lr_center_stage_eo_inner_orbit_state, eo_outer_orbit_state, eo_inner_orbit_state in zip(\n lr_center_stage_states, eo_outer_orbit_states, eo_inner_orbit_states\n ):\n pt_state_indexes.append(\n (\n lr_center_stage_eo_inner_orbit_state_indexes[lr_center_stage_eo_inner_orbit_state],\n eo_outer_orbit_state_indexes[eo_outer_orbit_state],\n eo_inner_orbit_state_indexes[eo_inner_orbit_state],\n )\n )\n\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n # When solve_via_c is passed pt_state_indexes (2048 lines of states in this case), it will try all 2048 of them\n # to find the state that has the shortest solution.\n self.lt_phase3.solve_via_c(pt_states=pt_state_indexes)\n\n self.print_cube_add_comment(\"edges EOed into high/low groups\", tmp_solution_len)\n self.post_eo_state = self.state[:]\n self.post_eo_solution = self.solution[:]\n\n # re-color the cube so that the edges are oriented correctly so we can\n # pair 4-edges then 8-edges. After all edge pairing is done we will uncolor\n # the cube and re-apply the solution.\n self.edges_flip_orientation(wing_strs, [])\n self.highlow_edges_print()", "def makeTAPE5(self):\n\n wn1, wn2 = self.wnLims\n\n # loop through each HITRAN molecule and create an associated TAPE5\n allT5 = []\n for iMol, mol in enumerate(self.mols):\n base = os.path.basename(mol)\n print(base)\n tape5 = 'TAPE5_%s' % base\n\n # LNFL TAPE5 records \n # (see lnfl_instructions document in LNFL release)\n rec1 = '$ %s' % base\n rec2 = '%10.3f%10.3f' % (wn1-25, wn2+25)\n\n # start off with all molecules off, then turn iMol on, then \n # generate a single string instead of a list of characters\n # and append \n rec3 = ['0'] * self.nMols\n rec3[iMol] = '1'\n rec3 = ''.join(rec3) + ' NBLK1 NOCPL LNOUT '\n end = '%%%%%'\n\n outDat = [rec1, rec2]\n\n # line coupling molecules\n if base in ['02_CO2', '06_CH4', '07_O2']:\n rec3 = rec3.replace('NOCPL', 'MRG2')\n rec4 = [' '] * self.nMols\n rec4[iMol] = '1'\n rec4 = ''.join(rec4)\n outDat += [rec3, rec4]\n else:\n outDat.append(rec3)\n # endif coupling\n\n outDat.append(end)\n\n # now write TAPE5\n outFP = open(tape5, 'w')\n for line in outDat: outFP.write('%s\\n' % line)\n outFP.close()\n\n # copy TAPE5 to subdirectory for molecule in buildDir\n target = '%s/%s' % (self.dirT5, tape5)\n if os.path.exists(target):\n print('WARNING: overwriting %s' % target)\n # endif target check\n os.rename(tape5, target)\n\n allT5.append(target)\n # end molecule loop\n\n self.allT5 = list(allT5)\n return self", "def processPhaseHeight(self, phasesInRing1, phasesInRing2):\n P11, P12, P21, P22 = ([] for i in range(4))\n phaseHeightDictionary = {}\n\n [P11.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index < 2]\n [P12.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 2 and index < 4]\n [P21.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 4 and index < 6]\n [P22.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 6 and index < 8]\n\n if (len(P11) == len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 10\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 10\n\n elif (len(P11) < len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 20\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 10\n\n elif (len(P11) > len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 10\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 20\n\n if (len(P12) == len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 10\n\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 10\n\n elif (len(P12) < len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 20\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 10\n\n elif (len(P12) > len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 10\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 20\n\n for phase in phasesInRing1:\n for key, value in phaseHeightDictionary.items():\n if int(key) == phase:\n self.phaseHeightInRing1.append(value)\n\n for phase in phasesInRing2:\n for key, value in phaseHeightDictionary.items():\n if int(key) == phase:\n self.phaseHeightInRing2.append(value)", "def step4(self):\n\t\tif self.b[self.k - 1] == 'a':\n\t\t\tif self.ends(\"al\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'c':\n\t\t\tif self.ends(\"ance\"): pass\n\t\t\telif self.ends(\"ence\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'e':\n\t\t\tif self.ends(\"er\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'i':\n\t\t\tif self.ends(\"ic\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'l':\n\t\t\tif self.ends(\"able\"): pass\n\t\t\telif self.ends(\"ible\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'n':\n\t\t\tif self.ends(\"ant\"): pass\n\t\t\telif self.ends(\"ement\"): pass\n\t\t\telif self.ends(\"ment\"): pass\n\t\t\telif self.ends(\"ent\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'o':\n\t\t\tif self.ends(\"ion\") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass\n\t\t\telif self.ends(\"ou\"): pass\n\t\t\t# takes care of -ous\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 's':\n\t\t\tif self.ends(\"ism\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 't':\n\t\t\tif self.ends(\"ate\"): pass\n\t\t\telif self.ends(\"iti\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'u':\n\t\t\tif self.ends(\"ous\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'v':\n\t\t\tif self.ends(\"ive\"): pass\n\t\t\telse: return\n\t\telif self.b[self.k - 1] == 'z':\n\t\t\tif self.ends(\"ize\"): pass\n\t\t\telse: return\n\t\telse:\n\t\t\treturn\n\t\tif self.m() > 1:\n\t\t\tself.k = self.j", "def rk5(accel,m,r,h,v):\n k1v = accel(m,r)\n k1r = v\n k2v = accel(m,r + 0.25*k1r*h)\n k2r = v + (0.25*k1v)*h\n k3v = accel(m,r + (3/32.*k1r + 9/32.*k2r)*h)\n k3r = v + (3/32.*k1v + 9/32.*k2v)*h\n k4v = accel(m,r + (1932/2197.*k1r - 7200/2197.*k2r + 7296/2197.*k3r)*h)\n k4r = v + (1932/2197.*k1v - 7200/2197.*k2v + 7296/2197.*k3v)*h\n k5v = accel(m,r + (439/216.*k1r - 8*k2r + 3680/513.*k3r - 845/4104.*k4r)*h)\n k5r = v + (439/216.*k1v - 8*k2v + 3680/513.*k3v - 845/4104.*k4v)*h\n k6v = accel(m,r - (8/27.*k1r + 2*k2r - 3544/2565.*k3r + 1859/4104.*k4r - 11/40.*k5r)*h)\n k6r = v - (8/27.*k1v + 2*k2v - 3544/2565.*k3v + 1859/4104.*k4v - 11/40.*k5v)*h\n\n # 5th order calculation\n new_v5 = v + h*(16/135.*k1v + 6656/12825.*k3v+28561/56430.*k4v - 9/50.*k5v + 2/55.*k6v) \n new_r5 = r + h*(16/135.*k1r + 6656/12825.*k3r+28561/56430.*k4r - 9/50.*k5r + 2/55.*k6r) \n \n return new_v5, new_r5", "def ikine_pose_ur5(xdes, dxdes, ddxdes, q0): \n k_p = 550;\n k_o = 150;\n k = np.diag([k_p, k_p, k_p, k_o, k_o, k_o, k_o])\n best_norm_e1 = 0.01\n best_norm_e2 = 0.01\n max_iter = 20\n delta = 0.001\n dq_p\t\t\t= np.zeros(6)\n\n q = copy(q0)\n for i in range(max_iter):\n T = fkine_ur5(q)\n e1 = xdes[0:3] - T[0:3,3]\n e2 = quatError(xdes[3:7], rot2quat(T[0:3,0:3]))\n e = np.concatenate((e1,e2), axis=0)\n de = -np.dot(k,e)\n J = jacobian_pose_ur5(q,delta)\n Jinv = np.linalg.pinv(J)\n dq = np.dot(Jinv, dxdes - de )\n q = q + delta*dq\n \n if (np.linalg.norm(e2) < best_norm_e2) & (np.linalg.norm(e1)< best_norm_e1):\n\n best_norm_e2 = np.linalg.norm(e2)\n best_norm_e1 = np.linalg.norm(e1)\n q_best = q\n dq_best = dq\n ddq_best \t\t= \t(dq_best - dq_p)/delta\n #ddq_best = np.dot(Jinv, ( ddxdes - np.dot(dJ,dq_best) ))\n print(\"iter: \", i)\n print(\"norma position: \",best_norm_e1)\n print(\"norma orientation: \",best_norm_e2)\n #print(\"---------\")\n\n \tdq_p \t= dq\n return q_best, dq_best, ddq_best", "def hash_flow(flow_5_tuple):\n ip_A = flow_5_tuple[0]\n ip_B = flow_5_tuple[1]\n tp_src = flow_5_tuple[2]\n tp_dst = flow_5_tuple[3]\n proto = flow_5_tuple[4]\n if proto == 6:\n #*** Is a TCP flow:\n if ip_A > ip_B:\n direction = 1\n elif ip_B > ip_A:\n direction = 2\n elif tp_src > tp_dst:\n direction = 1\n elif tp_dst > tp_src:\n direction = 2\n else:\n direction = 1\n else:\n #*** Isn't a flow, so arbitrarily set direction as 1:\n direction = 1\n if direction == 1:\n flow_tuple = (ip_A, ip_B, tp_src, tp_dst, proto)\n else:\n #*** Flip direction:\n flow_tuple = (ip_B, ip_A, tp_dst, tp_src, proto)\n return hash_tuple(flow_tuple)", "def endgame_score_connectfour(board, is_current_player_maximizer) :\n chains=sorted(board.get_all_chains(), key=lambda x: len(x))\n if len(chains[-1])>=4:\n if not is_current_player_maximizer:\n return 1000;\n else:\n return -1000;\n return 0;", "def endgame_score_connectfour_faster(board, is_current_player_maximizer) :\n chains=sorted(board.get_all_chains(), key=lambda x: len(x))\n if len(chains[-1])>=4:\n score = 1000+23*(42-board.count_pieces());\n if not is_current_player_maximizer:\n return score\n else:\n\n return -score\n return 0;", "def calculate_vn4(vn_data_array, outputFileName):\n vn_data_array = array(vn_data_array)\n nev = len(vn_data_array[:, 0])\n dN = real(vn_data_array[:, 0])\n Q1 = dN*vn_data_array[:, 1]\n Q2 = dN*vn_data_array[:, 2]\n Q3 = dN*vn_data_array[:, 3]\n Q4 = dN*vn_data_array[:, 4]\n Q5 = dN*vn_data_array[:, 5]\n Q6 = dN*vn_data_array[:, 6]\n\n # two-particle correlation\n N2_weight = dN*(dN - 1.)\n Q1_2 = abs(Q1)**2. - dN\n Q2_2 = abs(Q2)**2. - dN\n Q3_2 = abs(Q3)**2. - dN\n\n # four-particle correlation\n N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)\n Q1_4 = ((abs(Q1)**4.) - 2.*real(Q2*conj(Q1)*conj(Q1))\n - 4.*(dN - 2.)*(abs(Q1)**2.) + abs(Q2)**2.\n + 2*dN*(dN - 3.))\n Q2_4 = ((abs(Q2)**4.) - 2.*real(Q4*conj(Q2)*conj(Q2))\n - 4.*(dN - 2.)*(abs(Q2)**2.) + abs(Q4)**2.\n + 2*dN*(dN - 3.))\n Q3_4 = ((abs(Q3)**4.) - 2.*real(Q6*conj(Q3)*conj(Q3))\n - 4.*(dN - 2.)*(abs(Q3)**2.) + abs(Q6)**2.\n + 2*dN*(dN - 3.))\n\n # calcualte observables with Jackknife resampling method\n C1_4_array = zeros(nev)\n C2_4_array = zeros(nev)\n C3_4_array = zeros(nev)\n for iev in range(nev):\n array_idx = [True]*nev\n array_idx[iev] = False\n array_idx = array(array_idx)\n\n # C_1{4}\n C1_4_array[iev] = (mean(Q1_4[array_idx])/mean(N4_weight[array_idx])\n - 2.*((mean(Q1_2[array_idx])\n /mean(N2_weight[array_idx]))**2.))\n # C_2{4}\n C2_4_array[iev] = (mean(Q2_4[array_idx])/mean(N4_weight[array_idx])\n - 2.*((mean(Q2_2[array_idx])\n /mean(N2_weight[array_idx]))**2.))\n # C_3{4}\n C3_4_array[iev] = (mean(Q3_4[array_idx])/mean(N4_weight[array_idx])\n - 2.*((mean(Q3_2[array_idx])\n /mean(N2_weight[array_idx]))**2.))\n C1_4_mean = mean(C1_4_array)\n C1_4_err = sqrt((nev - 1.)/nev*sum((C1_4_array - C1_4_mean)**2.))\n C2_4_mean = mean(C2_4_array)\n C2_4_err = sqrt((nev - 1.)/nev*sum((C2_4_array - C2_4_mean)**2.))\n C3_4_mean = mean(C3_4_array)\n C3_4_err = sqrt((nev - 1.)/nev*sum((C3_4_array - C3_4_mean)**2.))\n\n v1_4 = 0.0\n v1_4_err = 0.0\n if C1_4_mean < 0:\n v1_4 = (-C1_4_mean)**0.25\n v1_4_err = 0.25*((-C1_4_mean)**(-0.75))*C1_4_err\n\n v2_4 = 0.0\n v2_4_err = 0.0\n if C2_4_mean < 0:\n v2_4 = (-C2_4_mean)**0.25\n v2_4_err = 0.25*((-C2_4_mean)**(-0.75))*C2_4_err\n\n v3_4 = 0.0\n v3_4_err = 0.0\n if C3_4_mean < 0:\n v3_4 = (-C3_4_mean)**0.25\n v3_4_err = 0.25*((-C3_4_mean)**(-0.75))*C3_4_err\n\n results = [v1_4, v1_4_err, C1_4_mean, C1_4_err,\n v2_4, v2_4_err, C2_4_mean, C2_4_err,\n v3_4, v3_4_err, C3_4_mean, C3_4_err,]\n f = open(outputFileName, 'w')\n f.write(\"# n vn{4} vn{4}_err Cn{4} Cn{4}_err\\n\")\n for i in range(1, 4):\n f.write(\"%d %.10e %.10e %.10e %.10e\\n\"\n % (i, results[4*i-4], results[4*i-3],\n results[4*i-2], results[4*i-1]))\n f.close()\n return", "def refugia_adj_5_simsplit_4epochs_iter4 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def test_path5():\n path = [(0, 0, 1)]\n path += [\n [('A', 3, 0)],\n (0, 1, 1),\n [('A', 2, 0)],\n (np.pi/2, 1, 1),\n [('B',3,0)],\n (0, 1, 1),\n [('B',2,0)],\n (np.pi/2, 1, 1),\n [('C',3,0)],\n (0, 1, 1),\n [('C',2,0)],\n (np.pi/2, 1, 1),\n [('D', 3, 0)],\n (0, 1, 1),\n #[('D',2,0)],\n (np.pi/2, 1, 1),\n ] * 4\n execute_path(path,True)", "def monolayer_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [0, 0, h], 0),\n ('B', [s, 0, 0], 0),\n ('C', [ax/2, ay/2, 0], 0),\n ('D', [ax/2 + s, ay/2, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([ 0, 1], 'A', 'B', 't5'),\n ([ 0, -1], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5'),\n ([ 0, 1], 'C', 'D', 't5'),\n ([ 0, -1], 'C', 'D', 't5'),\n )\n\n return lat", "def test_5():\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_5).construct_hierarchy()\n asc = h.atom_selection_cache()\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n assert len(ncs_groups) == 1\n # group 1\n assert ncs_groups[0].master_iselection.all_eq(\n asc.selection(string = \"chain A\").iselection())\n g1_c = ncs_groups[0].copies\n assert len(g1_c)==1\n assert g1_c[0].iselection.all_eq(\n asc.selection(string = \"chain B\").iselection())", "def FitnessSkopt5D(inputParams):\n\n elecsusParams = baseParams.copy()\n\n paramDict = {'Bfield': inputParams[0], \"T\": inputParams[1], 'Btheta': np.deg2rad(inputParams[2]), 'Etheta': np.deg2rad(inputParams[3]), 'Bphi': np.deg2rad(inputParams[4])}\n\n # This is the full dictionary to use on ElecSus.\n elecsusParams.update(paramDict)\n\n # First generate the output transmission as before.\n inputE = np.array([np.cos(elecsusParams[\"Etheta\"]), np.sin(elecsusParams[\"Etheta\"]), 0])\n\n # Call ElecSus to obtain the output electric field.\n try:\n # There may at times be issues with ElecSus, such as when NaN is entered as a variable.\n [outputE] = elecsus.calculate(globalDetuning, inputE, elecsusParams, outputs = [\"E_out\"])\n except:\n print(\"There was an issue in ElecSus, so this iteration will return a figure of merit of 0. Here are the input parameters:\")\n print(\"Input parameters: \" + str(elecsusParams))\n print(\"Input field: \" + str(inputE))\n return 0.0\n \n # Use a Jones matrix to determine the electric field after the action of the second polariser. As this is a single filter, the two polarisers are crossed.\n polariserAngle = elecsusParams[\"Etheta\"] + np.pi/2\n\n # Define the Jones matrix. Though only explicitly defined for the x-y plane, we add the third dimension so that we can use all 3 dimensions of the output field.\n jonesMatrix = np.matrix([[np.cos(polariserAngle)**2, np.sin(polariserAngle)*np.cos(polariserAngle), 0],\n\t\t\t\t\t\t\t\t[np.sin(polariserAngle)*np.cos(polariserAngle), np.sin(polariserAngle)**2, 0],\n [0, 0, 1]])\n\n # Get the output from the filter and the polarisers.\n singleFilterOutputE = np.array(jonesMatrix * outputE)\n\n # Get the transmission.\n singleFilterTransmission = (singleFilterOutputE * singleFilterOutputE.conjugate()).sum(axis=0)\n\n ENBW = ((integrate(singleFilterTransmission, globalDetuning)/singleFilterTransmission.max().real)/1e3).real\n\n figureOfMerit = (singleFilterTransmission.max()/ENBW).real\n \n if np.isnan(figureOfMerit):\n # Usually occurs in the case of high temperatures and B fields, since the transmission is just a flat line.\n print(\"Figure of merit is NaN! Here are the input parameters:\")\n print(str(elecsusParams))\n return 0.0\n else:\n return -1.0 * figureOfMerit", "def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)", "def app_phase(data_pupil,data_phase,oversize=4):\n return phaseangle(app_complex(data_pupil,data_phase,oversize))", "def parse_ao5_solves(result):\n solves = [\n result.value1,\n result.value2,\n result.value3,\n result.value4,\n result.value5,\n ]\n min_solve = min(solves)\n\n # Regulation 9f9 allows one DNF/DNS to count as the worst solve\n if min_solve in [DNS, DNF]:\n max_solve = min_solve\n complete_solves = solves.copy()\n complete_solves.pop(max_solve)\n min_solve = min(complete_solves)\n else:\n max_solve = max(solves)\n\n min_removed = False\n max_removed = False\n for index, solve in enumerate(solves):\n value = parse_value(solve, result.event.format)\n if solve == min_solve and not min_removed:\n solves[index] = f\"({value})\"\n min_removed = True\n elif solve == max_solve and not max_removed:\n solves[index] = f\"({value})\"\n max_removed = True\n else:\n solves[index] = value\n return {f\"value{index+1}\": solve for index, solve in enumerate(solves)}", "def q5(array):\n a = array[3]\n b = array[7]\n c = array[11]\n d = array[15]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return array[0], array[1], array[2], a1, array[4], array[5], array[6], b1, array[8], array[9], array[10], c1, array[\n 12], array[13], array[14], d1", "def test_phaselines():\n g = Graph(from_list=[\n (1, 3, 1),\n (2, 4, 1),\n (2, 5, 1),\n (3, 5, 1),\n (4, 6, 1),\n (5, 6, 1),\n ])\n g.add_node(7)\n\n p = g.phase_lines()\n assert set(g.nodes()) == set(p.keys())\n expects = {1: 0, 2: 0, 7: 0, 3: 1, 4: 1, 5: 2, 6: 3}\n assert p == expects, (p, expects)", "def test_Q4_stiffness_6dof_plane_stress(self):\n # Problem definition\n element_points = np.array([[-0.1, -0.1], [0.7, -0.2], [0.9, 0.3], [0, 0.5]])\n material = FEMOL.materials.IsotropicMaterial(2, 0.3, 1.1)\n thickness = 0.1\n\n # create element instances\n plane_element = FEMOL.elements.Q4(element_points, N_dof=2)\n plate_element = FEMOL.elements.Q4(element_points, N_dof=6)\n\n # define the plane stiffness tensor\n C = material.plane_tensor(thickness)\n D = material.bending_tensor(thickness)\n G = material.shear_tensor(thickness)\n\n Ke_plane = plane_element.Ke(C)\n Ke_plate = plate_element.Ke(C, D, G)\n\n Ke_plate = FEMOL.test_utils.reshape_Ke_into_plane_stress(Ke_plate)\n\n self.assertTrue(np.allclose(Ke_plane, Ke_plate))", "def computeHand(i1, i2, i3, i4, i5):\n arr = [Card(i1), Card(i2), Card(i3), Card(i4), Card(i5)];\n\n flushCount = [0, 0, 0, 0];\n rankCount = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];\n\n hand = (1 << i1) + (1 << i2) + (1 << i3) + (1 << i4) + (1 << i5);\n\n cards = arr;\n\n for i in range(len(arr)):\n rankCount[arr[i].rank] += 1\n flushCount[arr[i].suit] += 1\n\n # find straight\n scount = 1 if rankCount[12] > 0 else 0; # for the wheel straight\n highestStraight = -1;\n for i in range(len(rankCount)) :\n if (rankCount[i] > 0) :\n scount += 1\n if (scount >= 5) :\n highestStraight = i\n else :\n scount = 0\n\n # find flush\n for i in range(len(flushCount)) :\n if (flushCount[i] >= 5) :\n if (highestStraight != -1) :\n # if its a flush and straight, must be a straight flush\n return Hand(STRAIGHT_FLUSH, [highestStraight], hand, cards)\n else :\n highest = 0\n kickers = []\n for j in range(len(rankCount)):\n if rankCount[j] > 0: \n highest = j\n kickers.append(j)\n return Hand(FLUSH, [highest], hand, cards, kickers[::-1]);\n\n # if its not a flush, then must be ordinary straight\n if highestStraight != -1 :\n return Hand(STRAIGHT, [highestStraight], hand, cards);\n\n # check quads, full house, 3 of a kind, two pair, pair\n kickers = [];\n strength = HIGH_CARD;\n rank = [-1];\n for i in range(len(rankCount)) :\n if rankCount[i] == 4 :\n strength = FOUR_OF_A_KIND\n rank = [i]\n elif rankCount[i] == 3 :\n if strength == PAIR :\n strength = FULL_HOUSE\n rank = [i, rank[0]]\n else :\n strength = THREE_OF_A_KIND\n rank = [i]\n elif rankCount[i] == 2 :\n if strength == THREE_OF_A_KIND :\n strength = FULL_HOUSE;\n rank = [rank[0], i];\n elif strength == PAIR :\n strength = TWO_PAIR\n rank = [i, rank[0]]\n else :\n strength = PAIR\n rank = [i]\n elif rankCount[i] == 1 :\n kickers.append(i)\n\n return Hand(strength, rank, hand, cards, kickers[::-1])", "def b4Wan():\n \n tors, edges = tp.mesh_topo()\n G = build_graph(edges)\n \n # Get the routing path of all nodes\n table_file_name = '../outputs/mesh_routing_table.txt'\n table = all_routing(G, tors, table_file_name)\n if((os.path.isfile(table_file_name)) == False):\n table = all_routing(G, tors, table_file_name)\n else:\n json_data = open(table_file_name).read()\n table = json.loads(json_data)\n \n seeds, polys = cf.get_seeds_table(tors) #\n\n return G, tors, edges, table, seeds, polys", "def symmetric_cubature_fifth_order(dim=1):\n if dim == 1:\n weights = np.array([0.6667, 0.1667, 0.1667])\n sigma_pts = np.array([0., 1.7321, -1.7321])\n elif dim == 2:\n weights = np.array([0.4444, 0.1111, 0.1111, 0.1111, 0.1111, 0.0278, 0.0278, 0.0278, 0.0278])\n sigma_pts = np.block([[0., 1.7321, -1.7321, 0., 0., 1.7321, -1.7321, 1.7321, -1.7321],\n [0., 0., 0., 1.7321, -1.7321, 1.7321, -1.7321, -1.7321, 1.7321]])\n elif dim == 3:\n weights = np.array([0.3333, 0.0556, 0.0556, 0.0556, 0.0556, 0.0556, 0.0556, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278])\n sigma_pts = np.block([[0., 1.7321, -1.7321, 0., 0., 0., 0., 1.7321, -1.7321, 1.7321, -1.7321, 1.7321,\n -1.7321, 1.7321, -1.7321, 0., 0., 0., 0.],\n [0., 0., 0., 1.7321, -1.7321, 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,\n 0., 1.7321, -1.7321, 1.7321, -1.7321],\n [0., 0., 0., 0., 0., 1.7321, -1.7321, 0., 0., 0., 0., 1.7321, -1.7321, -1.7321,\n 1.7321, 1.7321, -1.7321, -1.7321, 1.7321]])\n elif dim == 6:\n weights = np.array([0.6667, -0.1111, -0.1111, -0.1111, -0.1111, -0.1111, -0.1111, -0.1111, -0.1111, -0.1111,\n -0.1111, -0.1111, -0.1111, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278])\n sigma_pts = np.block([[\n 0., 1.7321, -1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321,\n -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0.],\n [0., 0., 0., 1.7321, -1.7321, 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, 1.7321,\n -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321,\n -1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0.],\n [0., 0., 0., 0., 0., 1.7321, -1.7321, 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, 1.7321,\n 1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321,\n -1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0.],\n [0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, 1.7321,\n 1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 0., 0., 0.,\n 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,\n 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0., 0., 1.7321, -1.7321, 1.7321,\n -1.7321],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321,\n -1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321,\n 1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321,\n 1.7321, 0., 0., 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 1.7321, -1.7321, -1.7321,\n 1.7321]\n ])\n\n # else:\n # # The weights and sigma-points from McNamee & Stenger\n # I0 = 1.\n # I2 = 1.\n # I4 = 3.\n # I22 = 1.\n # u = np.array(np.sqrt(I4 / I2))\n # A0 = I0 - dim * (I2 / I4) ** 2 * (I4 - 0.5 * (dim - 1) * I22)\n # A1 = 0.5 * (I2 / I4) ** 2 * (I4 - (dim - 1) * I22)\n # A11 = 0.25 * (I2 / I4) ** 2 * I22\n # U0 = sym_set(dim)\n # U1 = sym_set(dim, u)\n # U2 = sym_set(dim, np.block([u, u]))\n # sigma_pts = np.block([U0, U1, U2])\n # weights = np.block([A0 * np.ones([1, U0.shape[1]]),\n # A1 * np.ones([1, U1.shape[1]]),\n # A11 * np.ones([1, U2.shape[1]])])\n return sigma_pts, weights", "def ramp6(params, phase, args=dict(n=4, guess=[1, 0.053, 0.0040 , 0.4])):\n # 2013-12-07 14:08 IJMC: Created.\n\n if params[3]>=phase.min():\n params[3] = phase.min() - np.diff(phase).mean()/1e6\n \n return params[0] * (1. + params[1] * (phase - 0.5) + params[2] * np.log(phase - params[3]))", "def create_phase_separator():\n cost_operators = []\n reduced_distance_matrix = np.delete(distance_matrix, starting_node, axis=0)\n reduced_distance_matrix = np.delete(reduced_distance_matrix, starting_node, axis=1)\n reduced_number_of_nodes = len(reduced_distance_matrix)\n number_of_qubits = reduced_number_of_nodes ** 2\n\n for t in range(reduced_number_of_nodes - 1):\n for city_1 in range(reduced_number_of_nodes):\n for city_2 in range(reduced_number_of_nodes):\n if city_1 != city_2:\n distance = reduced_distance_matrix[city_1, city_2] \n qubit_1 = t * (reduced_number_of_nodes) + city_1\n qubit_2 = (t + 1) * (reduced_number_of_nodes) + city_2\n cost_operators.append(PauliTerm(\"Z\", qubit_1, distance) * PauliTerm(\"Z\", qubit_2))\n\n costs_to_starting_node = np.delete(distance_matrix[:, starting_node], starting_node)\n for city in range(reduced_number_of_nodes):\n distance_from_0 = -costs_to_starting_node[city]\n qubit = city\n cost_operators.append(PauliTerm(\"Z\", qubit, distance_from_0))\n\n for city in range(reduced_number_of_nodes):\n distance_from_0 = -costs_to_starting_node[city]\n qubit = number_of_qubits - (reduced_number_of_nodes) + city\n cost_operators.append(PauliTerm(\"Z\", qubit, distance_from_0))\n\n phase_separator = [PauliSum(cost_operators)]\n return phase_separator", "def PH3From5Moments (moms, prec=1e-10):\n \n m1, m2, m3, m4, m5 = moms\n \n #convert the moments to reduced moments\n moms = ReducedMomsFromMoms([m1,m2,m3,m4,m5])\n for i in range(5):\n moms[i] /= m1**(i+1)\n #solve linear system of equations for a0 a1 a2\n M = np.matrix ([[moms[2], -moms[1], moms[0]],[moms[3], -moms[2], moms[1]],[moms[4], -moms[3], moms[2]]])\n a = np.linalg.solve(M, [1, moms[0], moms[1]])\n\n discr = a[2]*a[2]-3.0*a[1]\n if discr < 0:\n raise Exception(\"Invalid characteristic polynomial!\")\n\n gu = (a[2] + 2.0*math.sqrt(discr)) / 3.0\n g0 = (a[2] + math.sqrt(discr)) / 3.0\n \n rts = np.roots(np.hstack((1,a[::-1])))\n ix = np.argsort(np.real(rts)) \n lamb = (-rts)[ix]\n \n d1 = a[1] - a[2] - a[0] * moms[1];\n d2 = a[0] - a[1] - a[2] * d1;\n d3 = -a[0] - a[1]*d1 - a[2]*d2;\n\n if d1>1e-10 or (abs(d1)<prec and d2>0):\n raise Exception(\"Negative density around 0!\")\n\n if lamb[2]<0:\n raise Exception(\"Invalid eigenvalues!\")\n\n if lamb[0].imag < prec:\n gl = lamb[0].real\n else:\n gl = g0\n\n if gl > gu+prec:\n raise Exception(\"Invalid eigenvalues (gl>gu detected)!\")\n if gl > gu:\n gl = gu\n\n if abs(d1) < prec:\n g2 = 0\n else:\n g2 = -d2 / d1\n\n if g2>gu+prec:\n raise Exception(\"alpha_2 is negative!\")\n if g2 > gu:\n g2 = gu;\n\n x1 = max(g2, gl)\n\n if np.isreal(lamb[0]) and g2<gl:\n x13 = 0\n else:\n x13 = x1 - a[0] / (x1*x1 - a[2]*x1 + a[1])\n\n bels = (a[2]-x1)**2 - 4.0*(x1*x1-a[2]*x1+a[1])\n if bels<0 and bels>-prec:\n bels = 0\n\n x2 = (a[2] - x1 + math.sqrt(bels)) / 2.0\n x3 = (a[2] - x1 - math.sqrt(bels)) / 2.0\n p1 = d1 / (x13 - x1)\n p2 = (x1*d1 + d2) / (x13-x1) / x2\n p3 = (x1*x2*d1 + x2*d2 + x1*d2 + d3) / (x13-x1) / x2 / x3\n\n T = np.matrix([[-x1, 0, x13],[x2, -x2, 0],[0, x3, -x3]]) / m1\n alpha = np.matrix([p1,p2,p3])\n\n if x13<-prec or x13>x1:\n raise Exception(\"Invalid genrator!\")\n\n if np.min(np.real(alpha))<-prec:\n raise Exception(\"Initial vector has negative entries!\")\n \n if np.max(abs(np.imag(alpha)))>prec:\n raise Exception(\"Inital vector has complex entries!\")\n\n if np.max(np.real(alpha))>1+prec:\n raise Exception(\"Initial vector has entries that are greater than 1!\")\n\n return (alpha, T)", "def part_5b():\n\n raise NotImplementedError", "def Five2Four(data, shape4d, dst_type, format_, target=utils.CCE):\n utils.ops_dtype_check([data.dtype, dst_type], utils.DtypeForDavinci.ALL_FLOAT)\n shape5d = get_shape(data)\n if not shape_is_dynamic(data):\n if len(shape5d) != 5 or shape5d[-1] != 16:\n raise ValueError(\"five2four_cce only support 5-dim data and last dim should be 16\")\n\n bs, c1, h, w, c0 = shape5d\n if not shape_is_dynamic(data):\n utils.davinci_format_check(shape5d, \"NC1HWC0\", dim=5)\n # Check format\n if format_ not in ['NCHW', 'NHWC']:\n raise ValueError(\"{} format is not support, five2four only support NCHW and NHWC format input\"\n .format(format_))\n if format_ == \"NCHW\":\n if shape_is_dynamic(data):\n shape4d = [bs, c1 * c0, h, w]\n _, c, h_4d, w_4d = shape4d\n else:\n if shape_is_dynamic(data):\n shape4d = [bs, h, w, c1 * c0]\n _, h_4d, w_4d, c = shape4d\n utils.davinci_format_check(shape4d, format_, dim=4)\n\n # Check is shape4d and shape5d match\n if False not in [isinstance(s, (int, akg.tvm.expr.IntImm)) for s in shape5d]:\n if h_4d != h or w_4d != w:\n raise ValueError(\"five2four_cce's shape4d h and w should equal to data shape's h and w\")\n if c > c1 * c0 or c <= (c1 - 1) * c0:\n raise ValueError(\"five2four_cce's shape4d c should in set ((c1 - 1) * c0, c1 * c0]\")\n\n # Check size c when casting happens\n if not shape_is_dynamic(data):\n if data.dtype != dst_type and c >= C_LIMIT_FOR_CAST:\n raise ValueError(\"When input and output data type is not matched, shape of 'c' axis should not exceed {}, \"\n \"while currently set is {}\".format(C_LIMIT_FOR_CAST, c))\n\n @script(capture=locals())\n def nc1hwc0_to_nhwc(inputs, bs, h, w, c, c1, c0):\n output = allocate((bs, h, w, c), inputs.dtype, \"local\")\n for n_i in range(bs):\n for h_i in range(h):\n for w_i in range(w):\n for c_i in range(c1):\n for c_i0 in range(c0):\n output[n_i, h_i, w_i, c_i * c0 + c_i0] = inputs[n_i, c_i, h_i, w_i, c_i0]\n return output\n\n @script(capture=locals())\n def nc1hwc0_to_nchw(inputs, bs, h, w, c, c1, c0):\n output = allocate((bs, c, h, w), inputs.dtype, \"local\")\n for n_i in range(bs):\n for c_i in range(c1):\n for h_i in range(h):\n for w_i in range(w):\n for c_i0 in range(c0):\n output[n_i, c_i * c0 + c_i0, h_i, w_i] = inputs[n_i, c_i, h_i, w_i, c_i0]\n return output\n\n # if c % 16 == 0, h and w == 1, five2four is a reshape operation\n if shape_is_dynamic(data):\n call_reshape = isinstance(h, int) and isinstance(w, int) and h == 1 and w == 1\n else:\n call_reshape = h == 1 and w == 1 and c % 16 == 0\n c_value = None\n expansion = None\n if format_ == \"NHWC\":\n if call_reshape:\n output = akg.topi.reshape(data, (bs, h, w, c))\n if shape_is_dynamic(data):\n output = akg.tvm.compute((bs, h, w, c), lambda *indice: output(*indice), name=\"reshape\")\n elif c < c0:\n reshape_output = akg.topi.reshape(data, (bs, h, w, c0))\n output = akg.tvm.compute((bs, h, w, c), lambda *i: reshape_output(*i), name='slice_output')\n else:\n output = nc1hwc0_to_nhwc(\n data,\n to_tvm_const(bs),\n to_tvm_const(h),\n to_tvm_const(w),\n to_tvm_const(c),\n to_tvm_const(c1),\n to_tvm_const(c0))\n\n else:\n if call_reshape:\n output = akg.topi.reshape(data, (bs, c, h, w))\n if shape_is_dynamic(data):\n output = akg.tvm.compute((bs, c, h, w), lambda *indice: output(*indice), name=\"reshape\")\n else:\n output = nc1hwc0_to_nchw(\n data,\n to_tvm_const(bs),\n to_tvm_const(h),\n to_tvm_const(w),\n to_tvm_const(c),\n to_tvm_const(c1),\n to_tvm_const(c0))\n\n # two special cases for tiling strategy\n if not shape_is_dynamic(data):\n if c < c0 or output.dtype != dst_type:\n c_value = c\n if c % c0 != 0 and output.dtype != dst_type:\n expansion = int(ct_util.BLOCK_SIZE / get_bytes(data.dtype))\n attrs = get_attrs()\n if not call_reshape:\n attrs[\"custom_tiling\"] = five2four_tiling_strategy(data, c_value, expansion)\n\n if output.dtype != dst_type:\n output = akg.topi.cast(output, dst_type)\n return output, attrs", "def test_3_1_4D_cube_init(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0), (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1),\n (1, 0, 0, 1), (0, 1, 0, 0), (0, 1, 1, 0), (0, 1, 1, 1),\n (0, 1, 0, 1), (0, 0, 1, 0), (0, 0, 1, 1), (0, 0, 0, 1),\n (0.5, 0.5, 0.5, 0.5)]\n nn_checks = {(0, 1, 1, 0): [(1, 1, 1, 0), (0, 1, 1, 1), (1, 1, 1, 1),\n (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 0),\n (0.5, 0.5, 0.5, 0.5)],\n (0.5, 0.5, 0.5, 0.5): [(1, 1, 0, 1), (1, 0, 1, 1),\n (1, 1, 1, 0), (1, 0, 0, 0),\n (1, 1, 0, 0), (1, 0, 1, 0),\n (0, 1, 1, 1), (0, 0, 0, 1),\n (1, 1, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0), (0, 0, 1, 0),\n (0, 0, 0, 0), (0, 1, 1, 0),\n (0, 1, 0, 1), (0, 0, 1, 1)],\n (1, 0, 0, 0): [(1, 1, 0, 1), (1, 0, 1, 1), (1, 1, 1, 0),\n (1, 1, 0, 0), (1, 0, 1, 0), (1, 1, 1, 1),\n (1, 0, 0, 1), (0, 0, 0, 0),\n (0.5, 0.5, 0.5, 0.5)]}\n\n init_triangulation(4, 0, check, nn_checks)", "def _delayandsum5(data, offsets, ifactor2, steeramp, out, autopower):\n num, gridsize, numchannels = offsets.shape\n num = out.shape[0]\n for n in nb.prange(num):\n for gi in nb.prange(gridsize):\n out[n,gi] = 0\n autopower[n,gi] = 0\n for mi in range(numchannels):\n ind = offsets[n,gi,mi] + n\n r = (data[ind,mi] * (1-ifactor2[n,gi,mi]) \\\n + data[ind+1,mi] * ifactor2[n,gi,mi]) * steeramp[n,gi,mi]\n out[n,gi] += r\n autopower[n,gi] += r*r", "def build_non_planar_test_graph_with_k5_subgraph():\n graph = build_triangle_graph()\n addition_graph = build_k5_graph()\n\n merge_graphs(graph, addition_graph)\n\n return graph", "def build_non_planar_disconnected_test_graph_with_k5_subgraph():\n graph = build_triangle_graph()\n addition_graph = build_k5_graph()\n addition_graph2 = build_k5_graph()\n\n merge_graphs(graph, addition_graph)\n merge_graphs(graph, addition_graph2)\n\n return graph", "def test_exact_supercontrolled_decompose_phase_3_use_random(self, seed):\n state = np.random.default_rng(seed)\n decomposer = self.make_random_supercontrolled_decomposer(state)\n\n tgt_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_phase = state.random() * 2 * np.pi\n\n tgt_a, tgt_b = state.random(size=2) * np.pi / 4\n tgt_c = state.random() * np.pi / 2 - np.pi / 4\n tgt_unitary = np.exp(1j * tgt_phase) * tgt_k1 @ Ud(tgt_a, tgt_b, tgt_c) @ tgt_k2\n self.check_exact_decomposition(tgt_unitary, decomposer, num_basis_uses=3)", "def _sum_4_3j_to_6j(expr: Sum, resolvers, sums_dict):\n\n if len(expr.args) != 6:\n return None\n\n summand, sums = _parse_sum(expr)\n\n # From here on, we gradually rewrite the expression into the pattern of the\n # rule.\n try:\n phase, wigner_3js = _parse_3js(summand, sums=sums)\n if len(wigner_3js) != 4:\n return None\n\n ext_3js = []\n int_3js = []\n for i in wigner_3js:\n if len(i.m_dumms) == 3:\n int_3js.append(i)\n elif len(i.m_dumms) == 2:\n ext_3js.append(i)\n else:\n return None\n continue\n if len(ext_3js) != 2 or len(int_3js) != 2:\n return None\n\n # Put the external ones in the middle.\n for ext_3j in ext_3js:\n phase *= ext_3j.swap(\n lambda x: x.m_symb not in sums, 1\n )\n continue\n\n decided_ms = {}\n\n # For performance.\n empty = []\n\n # Get the edge between the two internal 3js.\n phase *= _check_m_contr(\n int_3js[0], int_3js[1], decided_ms, empty, [(2, 2)]\n )\n\n # Match the corresponding slots in the pattern.\n phase *= _check_m_contr(\n ext_3js[0], int_3js[0], decided_ms, empty, [(2, 0)]\n )\n phase *= _check_m_contr(\n int_3js[0], ext_3js[1], decided_ms, empty, [(1, 0)]\n )\n phase *= _check_m_contr(\n ext_3js[1], int_3js[1], decided_ms, empty, [(2, 0)]\n )\n phase *= _check_m_contr(\n ext_3js[0], int_3js[1], decided_ms, empty, [(0, 1)]\n )\n except _UnexpectedForm:\n return None\n\n ext_0 = ext_3js[0].indices\n ext_1 = ext_3js[1].indices\n int_0 = int_3js[0].indices\n int_1 = int_3js[1].indices\n j2, m2 = ext_0[0].j, ext_0[0].m\n j3, m3 = ext_0[1].j, -ext_0[1].m\n j1, m1 = ext_0[2].j, ext_0[2].m\n assert m1 == -int_0[0].m\n j5, m5 = int_0[1].j, int_0[1].m\n j6, m6 = int_0[2].j, int_0[2].m\n assert m5 == -ext_1[0].m\n jprm3, mprm3 = ext_1[1].j, ext_1[1].m\n j4, m4 = ext_1[2].j, ext_1[2].m\n assert m4 == -int_1[0].m\n assert m2 == -int_1[1].m\n assert m6 == -int_1[2].m\n\n noinv_phase, phase = _decomp_phase(phase.xreplace(decided_ms), sums)\n jms, rels = _get_jms_rels(wigner_3js)\n simpl_phase = _simpl_pono(phase, resolvers, sums_dict, jms, rels)\n expected_phase = _simpl_pono(_NEG_UNITY ** (\n - m1 - m2 - m4 - m5 - m6\n ), resolvers, sums_dict, jms, rels)\n if (simpl_phase / expected_phase).simplify() != 1:\n return None\n\n return _NEG_UNITY ** (j3 - m3 - j1 - j2 - j4 - j5 - j6) / (2 * j3 + 1) * (\n KroneckerDelta(j3, jprm3)\n * KroneckerDelta(m3, mprm3)\n * Wigner6j(j1, j2, j3, j4, j5, j6)\n ) * noinv_phase", "def phase(self):\n pass", "def chain_corrections():\n \n #read the files\n sample_4m=read_sample(map_files('sample_4m'))\n empty_cell_4m=read_sample(map_files('empty_cell_4m'))\n empty_4m=read_sample(map_files('empty_4m'))\n transmission_sample_cell_4m=read_sample(map_files('trans_sample_4m'))\n transmission_empty_cell_4m=read_sample(map_files('trans_empty_cell_4m'))\n blocked_beam_4m=read_sample(map_files('blocked_4m'))\n sensitivity=read_div(map_files('div'))\n #mask=read_sample(map_files('mask'))\n \n #normalize the monitors\n \n sample_4m_norm=monitor_normalize(sample_4m)\n empty_cell_4m_norm=monitor_normalize(empty_cell_4m)\n transmission_sample_cell_4m_norm=monitor_normalize(transmission_sample_cell_4m)\n transmission_empty_cell_4m_norm=monitor_normalize(transmission_empty_cell_4m)\n empty_4m_norm=monitor_normalize(empty_4m)\n blocked_beam_4m_norm=monitor_normalize(blocked_beam_4m)\n \n #calculate q\n sample_4m_norm_q=convert_q(sample_4m_norm)\n empty_cell_4m_norm_q=convert_q(empty_cell_4m)\n blocked_beam_4m_norm_q=convert_q(blocked_beam_4m_norm)\n transmission_sample_cell_4m_norm_q=convert_q(transmission_sample_cell_4m_norm)\n transmission_empty_cell_4m_norm_q=convert_q(transmission_empty_cell_4m_norm)\n empty_4m_norm_q=convert_q(empty_4m_norm)\n \n \n print 'converted'\n #convert flatness\n sample_4m_solid=correct_solid_angle(sample_4m_norm_q)\n empty_cell_4m_solid=correct_solid_angle(empty_cell_4m_norm_q)\n blocked_beam_4m_solid=correct_solid_angle(blocked_beam_4m_norm_q)\n transmission_sample_cell_4m_solid=correct_solid_angle(transmission_sample_cell_4m_norm_q)\n transmission_empty_cell_4m_solid=correct_solid_angle(transmission_empty_cell_4m_norm_q)\n empty_4m_solid=correct_solid_angle(empty_4m_norm_q)\n \n \n #calculate transmission\n coord_left=(60,60)\n coord_right=(70,70)\n transmission_sample_cell_4m_rat=generate_transmission(transmission_sample_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n transmission_empty_cell_4m_rat=generate_transmission(transmission_empty_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n print 'Sample transmission= {} (IGOR Value = 0.724)'.format(transmission_sample_cell_4m_rat)\n print 'Empty Cell transmission= {} (IGOR Value = 0.929)'.format(transmission_empty_cell_4m_rat)\n print 'hi'\n \n #Initial Correction -- Not with the sub/mult tools,\n #SAM = sample_4m_solid.data\n #print SAM.x\n #EMP = empty_4m_solid.data\n #print \"EMP: \"\n #print EMP.x\n #BGD = blocked_beam_4m_solid.data\n #print \"BGD\"\n #print BGD.x\n #Tsam = transmission_sample_cell_4m_rat\n #Temp = transmission_empty_cell_4m_rat\n #COR1 = SAM.__sub__(BGD)\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n \n SAM = sample_4m_solid\n print SAM.data.x\n EMP = empty_4m_solid\n print \"EMP: \"\n print EMP.data.x\n BGD = blocked_beam_4m_solid\n print \"BGD:\"\n print BGD.data.x\n Tsam = transmission_sample_cell_4m_rat\n Temp = transmission_empty_cell_4m_rat\n print \"COR1:\"\n COR1 = SAM.__sub1__(BGD)\n print COR1.data.x #check=works\n #-----Problems Here-------\n print \"COR2:\"\n COR2 = (EMP.__sub1__(BGD)) #check=works\n print COR2.data.x\n print \"COR3:\"\n #AJJ - __mul__ not working because Tsam and Temp are Measurement instances and not simply floats. See above.\n COR3 = COR2.__mul__(Tsam/Temp) #mul not working\n print COR3.data.x\n #COR = COR1.__sub1__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.data.x", "def part_5a():\n\n raise NotImplementedError", "def pro_avfid_superoperator_phasecorrected(U,phases):\n Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0],\n [0, np.exp(-1j*np.deg2rad(phases[1])), 0, 0, 0, 0, 0, 0, 0],\n [0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1])), 0, 0, 0, 0, 0, 0],\n [0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0],\n [0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[3]-phases[-1])), 0, 0, 0, 0],\n [0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1]+phases[2]-phases[0])), 0, 0, 0],\n [0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[5])), 0, 0],\n [0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[5]+phases[1]-phases[0])), 0],\n [0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1]+phases[5]-phases[0]))]],\n type='oper',\n dims=[[3, 3], [3, 3]])\n\n if U.type=='oper':\n U=Ucorrection*U\n ptrace = np.abs((U.dag()*U_target).tr())**2\n dim = 9 # dimension of the whole space\n return np.real((ptrace+dim)/(dim*(dim+1)))\n\n elif U.type=='super':\n U=qtp.to_super(Ucorrection)*U\n return np.real(qtp.average_gate_fidelity(U,target=U_target_diffdims))", "def getPhase(phase):", "def to_slots5(data):\n\tres = [0] * ((len(data) + 4) // 5)\n\tfor i, d in enumerate(data):\n\t\tres[i // 5] += d\n\treturn res", "def activatePhase4 (self):\n try:\n # entering phase 4\n self._log(\"enter-phase4\").debug2(\"entering phase4 for '%s'. activateData = %s\",self._logicalDiskName,self._activateData)\n \n shouldUpdateCurrentUuid = self._activateData.shouldUpdateCurrentUuid\n blockDevice = self._activateData.blockDevice\n timer = self._activateData.timer\n mountingPoint = self._activateData.mountingPoint\n blockDeviceReadahead = self._activateData.blockDeviceReadahead\n currentUuid = self._activateData.currentUuid\n\n # if needed, update current uuid after new file system was created\n if shouldUpdateCurrentUuid:\n currentUuid,rc = self._getBlockDeviceUuid(blockDevice,timer)\n if ((rc != ReturnCodes.kOk) or (currentUuid == None)):\n self._log(\"failed-blkid2\").error(\"activatePhase4() failed to obtain uuids! activateData = %s\",self._activateData)\n self.operationalStatus = blinky_generated_enums.FileSystemOperationalStatusType.kDown\n self.operationalStatusReason = blinky_generated_enums.FileSystemOperationalStatusReasonType.kUuidError\n return rc\n\n self._log(\"fs-created-on-block-device\").debug2(\"new file system (UUID='%s') was created on block device '%s'\",currentUuid,blockDevice)\n self._activateData.currentUuid = currentUuid\n self._activateData.shouldUpdateCurrentUuid = False\n \n # before mounting\n rc = self._setReservedBlockPercentage(blockDevice,0,timer)\n if (rc != ReturnCodes.kOk):\n self._log(\"activate-fail-on-setting-reserved-block-percentage\").error(\"activatePhase4() failed on setting reserved blocks parameter! activateData = %s\",self._activateData)\n self.operationalStatus = blinky_generated_enums.FileSystemOperationalStatusType.kDown\n self.operationalStatusReason = blinky_generated_enums.FileSystemOperationalStatusReasonType.kUnknown\n return rc\n\n # mount\n rc = self._mount(blockDevice,mountingPoint,blockDeviceReadahead,timer)\n if (rc != ReturnCodes.kOk):\n self._log(\"activate-fail-on-mount\").error(\"activatePhase4() failed on mount! activateData = %s\",self._activateData)\n self.operationalStatus = blinky_generated_enums.FileSystemOperationalStatusType.kDown\n self.operationalStatusReason = blinky_generated_enums.FileSystemOperationalStatusReasonType.kMountError\n return rc\n\n # full success - since this is the final stage\n self._log(\"activate-success\").debug2(\"activation succeeded! activateData = %s\",self._activateData)\n self.operationalStatus = blinky_generated_enums.FileSystemOperationalStatusType.kUp\n self.operationalStatusReason = blinky_generated_enums.FileSystemOperationalStatusReasonType.kOk\n\n return ReturnCodes.kOk\n\n except Exception,e:\n self._log(\"activate-fs-phase4-exception\").error(\"activatePhase4() failed! activateData = %s, exception = '%s'\",self._activateData,e)\n self.operationalStatus = blinky_generated_enums.FileSystemOperationalStatusType.kDown\n self.operationalStatusReason = blinky_generated_enums.FileSystemOperationalStatusReasonType.kUnknown\n return ReturnCodes.kGeneralError", "def prob5():\n x, y, l = sy.symbols('x, y, l')\n A = sy.Matrix([[x-y, x, 0],\n [x, x-y, x],\n [0, x, x-y]])\n char_poly = sy.det(A - l*sy.eye(3))\n eig_vals = sy.solve(char_poly, l)\n eig_dict = {}\n for v in eig_vals:\n eig_dict[v] = (A - v*sy.eye(3)).nullspace()\n return eig_dict", "def trivial_phase(indivs):\r\n\tpool=make_pool(len(indivs[0]))\r\n\r\n\tfor i in xrange(1,len(pool)+1):\r\n\t\tall_combi=itertools.combinations(pool,i)\r\n\t\tfor t in all_combi:\r\n\t\t\tt+=t\r\n\t\t\tcandidate_couples=list(itertools.combinations(t,2))\r\n\t\t\tgeno_list=map(lambda x: mix(x[0],x[1]), candidate_couples)\r\n\t \t\tif check(indivs, geno_list):\r\n\t \t\t\treturn list(set(t)), candidate_couples\r\n\tprint \"It's impossible to execute this, something must be wrong.\"", "def part1_2(puzzle_input):\n [initial_state_string, configurations] = puzzle_input.split('\\n\\n')\n initial_state = re.sub('initial state: ', '', initial_state_string)\n rules_arr = configurations.split('\\n')\n rules = [re.split(' => ', line) for line in rules_arr]\n rules = {t[0]: t[1] for t in rules}\n current_state = '..........' + initial_state + '...............................................................................................................................................'\n for i in range(100): # After 100th cycle, the only change is that there is a '#' that shifts right\n next_generation_string = \"\"\n for index, pot in enumerate(current_state):\n if index == 0:\n temp_string = '..' + current_state[:3]\n elif index == 1:\n temp_string = '.' + current_state[:4]\n elif index == len(current_state) - 2:\n temp_string = current_state[-4:] + '.'\n elif index == len(current_state) - 1:\n temp_string = current_state[-3:] + '..'\n else:\n temp_string = current_state[index-2:index+3]\n if temp_string in rules:\n next_generation_string += rules[temp_string]\n else:\n next_generation_string += pot\n current_state = next_generation_string\n\n # For part 1\n part1_sum = 0\n if i == 19:\n for index, pot in enumerate(current_state):\n if pot == '#':\n part1_sum += index - 10\n print(part1_sum)\n\n # Part 2\n part2_sum = 0\n for index, pot in enumerate(current_state):\n if pot == '#':\n part2_sum += index - 10 + 50000000000 - 100\n print(part2_sum)", "def get_phase_parameters(phase):\r\n if phase==1:\r\n return {\r\n 'Q1': 0.5,\r\n 'Q3': 0.3,\r\n 'Q5': 0.15,\r\n 'win': 0.05,\r\n }\r\n elif phase==2:\r\n return {\r\n 'Q1': 0.35,\r\n 'Q3': 0.25,\r\n 'Q5': 0.25,\r\n 'win': 0.15,\r\n }\r\n elif phase==3:\r\n return {\r\n 'Q1': 0.25,\r\n 'Q3': 0.25,\r\n 'Q5': 0.25,\r\n 'win': 0.25,\r\n }\r\n elif phase==4:\r\n return {\r\n 'Q1': 0.15,\r\n 'Q3': 0.2,\r\n 'Q5': 0.35,\r\n 'win': 0.3,\r\n }\r\n elif phase==5:\r\n return {\r\n 'Q1': 0.05,\r\n 'Q3': 0.1,\r\n 'Q5': 0.35,\r\n 'win': 0.50,\r\n }\r\n elif phase==6:\r\n return {\r\n 'Q1': 0.03,\r\n 'Q3': 0.1,\r\n 'Q5': 0.22,\r\n 'win': 0.75\r\n }", "def crack_md5(cand_len, b_values):\n global s, K # `s` and `K` are global\n\n slv = z3.Solver()\n \n inp = [z3.BitVec(f'inp_{i}', 32) for i in range(16)]\n\n add_inp_constraint(cand_len, inp, slv)\n\n # MD5 implementation using symbolic variables.\n a0 = 0x67452301 # A\n b0 = 0xefcdab89 # B\n c0 = 0x98badcfe # C\n d0 = 0x10325476 # D\n\n A, B, C, D = a0, b0, c0, d0\n \n for i in range(64):\n if 0 <= i and i <= 15:\n F = (B & C) | (~B & D)\n g = i\n elif 16 <= i and i <= 31:\n F = (D & B) | (~D & C)\n g = (5*i + 1) % 16\n elif 32 <= i and i <= 47:\n F = B ^ C ^ D\n g = (3*i + 5) % 16\n elif 48 <= i <= 63:\n F = C ^ (B | ~D)\n g = (7*i) % 16\n\n F &= 0xFFFFFFFF\n F = (F + A + K[i] + inp[g]) & 0xFFFFFFFF \n A = D\n D = C\n C = B\n\n # NOTE: rol DOES NOT WORK! WE HAVE TO USE z3's `RotateLeft`.\n B = (B + z3.RotateLeft(F, s[i])) & 0xFFFFFFFF\n\n slv.add(B & 0x3FF == b_values[i])\n\n \n # Check for solutions\n def to_ascii(x):\n return chr(x & 0xFF) + chr((x >> 8) & 0xFF) + chr((x >> 16) & 0xFF) + chr(x >> 24)\n\n while slv.check() == z3.sat:\n mdl = slv.model()\n\n print('[+] Solution FOUND!')\n \n flag = ''\n for i, j in enumerate(inp):\n yy = mdl.evaluate(j).as_long() \n print(f'[+] {i:2d} ~~> {yy:08X} ~~> {repr(to_ascii(yy))}')\n flag += to_ascii(yy)\n\n flag = flag[:cand_len]\n\n print('[+] FLAG IS: hxp{%s}' % flag)\n return 1\n else:\n print('[+] Cannot find satisfiable solution :\\\\')\n return -1", "def test_5_correctness(self):\n # TermTestState (see below) is designed so that the first solution found\n # has a higher cost than the second solution.\n # Start in stateindex 1 and look for path to index 0.\n plan = list(astar(TermTestState(),\n lambda state: (state.state == 0), # goal test\n TermTestState.TermTestH)) # function: distance to goal\n\n correct = [Action(\"1\", \"3\", cost=1.0),\n Action(\"3\", \"4\", cost=0.5),\n Action(\"4\", \"5\", cost=0.5),\n Action(\"5\", \"6\", cost=0.5),\n Action(\"6\", \"G\", cost=0.5)]\n \n cost = sum(p.cost for p in plan)\n c_cost = sum(c.cost for c in correct)\n # Check cost\n self.assertEqual(cost, c_cost,\n f\"Correct cost {c_cost}, your plan cost: {cost}. Check so you return the best solution, and not only the first one found if you have too high cost. Check so you return the full path if too low.\"\n )\n # Check path in general.\n self.assertTrue(len(plan) == len(correct) and all(p == c for p,c in zip(plan,correct)),\n f\"Correct plan: {correct}; Your plan: {plan}; Make sure that the plan isn't e.g. reversed.\")", "def eq510821ad2(db, fy, fcp):", "def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))", "def refugia_adj_5_full_2_iter3 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def q4(array):\n a = array[0]\n b = array[5]\n c = array[10]\n d = array[15]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return a1, array[1], array[2], array[3], array[4], b1, array[6], array[7], array[8], array[9], c1, array[11], array[\n 12], array[13], array[14], d1", "def calculate_vn4_over_vn2(vn_data_array, outputFileName):\n vn_data_array = array(vn_data_array)\n nev = len(vn_data_array[:, 0])\n dN = real(vn_data_array[:, 0])\n Q1 = dN*vn_data_array[:, 1]\n Q2 = dN*vn_data_array[:, 2]\n Q3 = dN*vn_data_array[:, 3]\n Q4 = dN*vn_data_array[:, 4]\n Q5 = dN*vn_data_array[:, 5]\n Q6 = dN*vn_data_array[:, 6]\n\n # two-particle correlation\n N2_weight = dN*(dN - 1.)\n Q1_2 = abs(Q1)**2. - dN\n Q2_2 = abs(Q2)**2. - dN\n Q3_2 = abs(Q3)**2. - dN\n\n # four-particle correlation\n N4_weight = dN*(dN - 1.)*(dN - 2.)*(dN - 3.)\n Q1_4 = ((abs(Q1)**4.) - 2.*real(Q2*conj(Q1)*conj(Q1))\n - 4.*(dN - 2.)*(abs(Q1)**2.) + abs(Q2)**2.\n + 2*dN*(dN - 3.))\n Q2_4 = ((abs(Q2)**4.) - 2.*real(Q4*conj(Q2)*conj(Q2))\n - 4.*(dN - 2.)*(abs(Q2)**2.) + abs(Q4)**2.\n + 2*dN*(dN - 3.))\n Q3_4 = ((abs(Q3)**4.) - 2.*real(Q6*conj(Q3)*conj(Q3))\n - 4.*(dN - 2.)*(abs(Q3)**2.) + abs(Q6)**2.\n + 2*dN*(dN - 3.))\n\n # calcualte observables with Jackknife resampling method\n r1_array = zeros(nev)\n r2_array = zeros(nev)\n r3_array = zeros(nev)\n F1_array = zeros(nev)\n F2_array = zeros(nev)\n F3_array = zeros(nev)\n for iev in range(nev):\n array_idx = [True]*nev\n array_idx[iev] = False\n array_idx = array(array_idx)\n\n # C_n{4}\n C_1_4 = (mean(Q1_4[array_idx])/mean(N4_weight[array_idx])\n - 2.*((mean(Q1_2[array_idx])/mean(N2_weight[array_idx]))**2.))\n C_1_2 = mean(Q1_2[array_idx])/mean(N2_weight[array_idx])\n if C_1_4 < 0. and C_1_2 > 0.:\n v1_4 = (-C_1_4)**0.25\n v1_2 = sqrt(C_1_2)\n r1_array[iev] = v1_4/(v1_2 + 1e-15)\n F1_array[iev] = sqrt((v1_2**2. - v1_4**2.)\n /(v1_2**2. + v1_4**2. + 1e-15))\n\n C_2_4 = (mean(Q2_4[array_idx])/mean(N4_weight[array_idx])\n - 2.*((mean(Q2_2[array_idx])/mean(N2_weight[array_idx]))**2.))\n C_2_2 = mean(Q2_2[array_idx])/mean(N2_weight[array_idx])\n if C_2_4 < 0. and C_2_2 > 0.:\n v2_4 = (-C_2_4)**0.25\n v2_2 = sqrt(C_2_2)\n r2_array[iev] = v2_4/v2_2\n F2_array[iev] = sqrt((v2_2**2. - v2_4**2.)\n /(v2_2**2. + v2_4**2. + 1e-15))\n\n C_3_4 = (mean(Q3_4[array_idx])/mean(N4_weight[array_idx])\n - 2.*((mean(Q3_2[array_idx])/mean(N2_weight[array_idx]))**2.))\n C_3_2 = mean(Q3_2[array_idx])/mean(N2_weight[array_idx])\n if C_3_4 < 0. and C_3_2 > 0.:\n v3_4 = (-C_3_4)**0.25\n v3_2 = sqrt(C_3_2)\n r3_array[iev] = v3_4/v3_2\n F3_array[iev] = sqrt((v3_2**2. - v3_4**2.)\n /(v3_2**2. + v3_4**2. + 1e-15))\n\n r1_mean = mean(r1_array)\n r1_err = sqrt((nev - 1.)/nev*sum((r1_array - r1_mean)**2.))\n r2_mean = mean(r2_array)\n r2_err = sqrt((nev - 1.)/nev*sum((r2_array - r2_mean)**2.))\n r3_mean = mean(r3_array)\n r3_err = sqrt((nev - 1.)/nev*sum((r3_array - r3_mean)**2.))\n\n F1_mean = mean(F1_array)\n F1_err = sqrt((nev - 1.)/nev*sum((F1_array - F1_mean)**2.))\n F2_mean = mean(F2_array)\n F2_err = sqrt((nev - 1.)/nev*sum((F2_array - F2_mean)**2.))\n F3_mean = mean(F3_array)\n F3_err = sqrt((nev - 1.)/nev*sum((F3_array - F3_mean)**2.))\n\n results = [r1_mean, r1_err, F1_mean, F1_err,\n r2_mean, r2_err, F2_mean, F2_err,\n r3_mean, r3_err, F3_mean, F3_err]\n f = open(outputFileName, 'w')\n f.write(\"# n vn{4}/vn{2} (vn{4}/vn{2})_err Fn Fn_err\\n\")\n f.write(\"# Fn = sqrt((vn{2}^2 - vn{4}^2)/(vn{2}^2 + vn{4}^2))\\n\")\n for i in range(1, 4):\n f.write(\"%d %.10e %.10e %.10e %.10e\\n\"\n % (i, results[4*i - 4], results[4*i - 3],\n results[4*i - 2], results[4*i-1]))\n f.close()\n return", "def connected((e,r)):\n \n # Deal with the middle case so we don't divide by zero\n if r==0: return [(1,1),(2,1),(3,1),(4,1),(5,1),(0,1)]\n # If the input is impossible, return nothing to prune the branch (shouldn't\n # happen)\n if e>=6*r: return []\n connected=[]\n mult=e//r\n rem=e % r\n #Going sideways\n toAdd=((6*r-1,r) if e==0 else (e-1,r))\n connected.append(toAdd)\n toAdd=((0,r) if e==6*r-1 else (e+1,r))\n connected.append(toAdd)\n #Going inward\n toAdd=( (0,r-1)if mult==5 and rem==r-1 else (mult*(r-1)+rem,r-1) )\n connected.append(toAdd)\n if rem!=0:\n connected.append((mult*(r-1)+rem-1,r-1))\n\n #Going outward\n if r<nLayers-1:\n connected.append((mult*(r+1)+rem,r+1))\n connected.append((mult*(r+1)+rem+1,r+1))\n if rem==0: # only case where negatives could result\n if mult>0: connected.append( (mult*(r+1)-1,r+1))\n else: connected.append( (6*(r+1)-1,r+1))\n \n return connected", "def test_exact_supercontrolled_decompose_phase_1_use_random(self, seed):\n state = np.random.default_rng(seed)\n basis_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_phase = state.random() * 2 * np.pi\n basis_b = state.random() * np.pi / 4\n basis_unitary = np.exp(1j * basis_phase) * basis_k1 @ Ud(np.pi / 4, basis_b, 0) @ basis_k2\n decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary))\n\n tgt_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_phase = state.random() * 2 * np.pi\n\n tgt_unitary = np.exp(1j * tgt_phase) * tgt_k1 @ Ud(np.pi / 4, basis_b, 0) @ tgt_k2\n self.check_exact_decomposition(tgt_unitary, decomposer, num_basis_uses=1)", "def EM5(Type=\"DFA\"):\n R21, R22, R23 = state('R21'), state('R22'), state('R23')\n for i in range(1, 7):\n R21.transit[str(i)] = R21\n R22.transit[str(i)] = R22\n R23.transit[str(i)] = R23\n R21.transit['2'] = R22\n R22.transit['2'] = R23\n R23.transit['2'] = R22\n if Type == \"pDFA\":\n R2 = pDFA('R2', list('123456'), [R21, R22, R23], R21, [R23])\n else:\n R2 = DFA('R2', list('123456'), [R21, R22, R23], R21, [R23])\n if (SIZEOF):\n EM_size[\"EM5\"] = asizeof.asizeof(R2)\n return R2", "def fourth_poly(a, b, c, d, e):\n return lambda z: a*z**4 + b*z**3 + c*z**2 + d*z + e", "def refugia_adj_5_simsplit_4epochs_iter3 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def test_from_t4_to_topology(self):\n Molecule.from_polymer_pdb(\n get_data_file_path(\"proteins/T4-protein.pdb\")\n ).to_topology()", "def split_full_4epochs_iter5 (params, ns):\n #29 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T0, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def setUp(self):\n self.f1 = uniutil.polynomial(enumerate([3, 6, 81, 1]), Z)\n self.f2 = uniutil.polynomial(enumerate([1, 81, 6, 3]), Z)\n self.f3 = uniutil.polynomial(enumerate([37, 6, 18, 1]), Z)\n self.f4 = uniutil.polynomial(enumerate([91, 7, 14, 1]), Z)\n # f5 = (x - 6)(x - 5)...x(x + 1)(x + 2) - 1\n self.f5 = uniutil.polynomial(enumerate([1439, -1368, -1324,\n 1638, -231, -252,\n 114, -18, 1]), Z)", "def refugia_adj_5_simsplit_4epochs_iter2 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def TR_algo3(h, vd=2):\n ve = 0\n vd = 2\n p = [0]*N\n for i in range(M-1, -1, -1):\n w = [bit_component(h, i*N+ii) for ii in range(N)]\n #print(i, w)\n w = sum( [wx*2**j for j, wx in enumerate(w)] )\n #print(i, w, gc(w))\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(N):\n p[j] += bit_component(l, j) << i\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % N\n return p", "def heuristic_connectfour(board, is_current_player_maximizer):\n current_chains = board.get_all_chains(True)\n other_chains = board.get_all_chains(False)\n #print \"current_chains: \",current_chains\n #print \"other_chains: \", other_chains\n current_score = 0\n other_score = 0\n for chain in current_chains:\n if len(chain)>=1:\n current_score+=len(chain)**2\n for chain in other_chains:\n if len(chain)>=1:\n other_score+=len(chain)**2\n score = 10*(other_score - current_score)\n #if not is_current_player_maximizer\n #raise NotImplementedError\n #print \"current_score: \", current_score, \"other_score: \", other_score;\n if is_current_player_maximizer:\n return -score\n else:\n return score", "def rk4(fn, time, state, time_step, *args):\n k1 = time_step*fn(time, state, *args)\n k2 = time_step*fn(time + 0.5 * time_step, state + 0.5 * k1, *args)\n k3 = time_step*fn(time + 0.5 * time_step, state + 0.5 * k2, *args)\n k4 = time_step*fn(time + time_step, state + k3, *args)\n return state + (1/6)*(k1 + 2*k2 + 2*k3 + k4)", "def refugia_adj_5_simsplit_4epochs_iter1 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def generate_n4(self):\r\n self.generate_n1()\r\n self.n_4 = (self.n_1+self.n_2+self.n_3) % 7\r\n self.n_4decimal = int(abs(pi)*math.pow(10, self.n_4)) % 10", "def phase_blocks(posBlock, GTblock, RefBlock, FlagB):\n blockSameCount = 0\n blockReverseCount = 0\n GTblockPhase = []\n GTblockReturn = []\n\n for i in range(len(GTblock)):\n GT = GTblock[i]\n RefGT = RefBlock[i]\n if FlagB[i] == \"FV\": # uncertain variants are set to N\n GTblock[i] = ['N', 'N']\n else: # find and count cases when phased genotype is consistent/inconsistent with parental genotypes\n GTphase = phase_state(GT, RefGT)\n if GTphase == 'same':\n blockSameCount += 1\n GTblockPhase.append('same')\n elif GTphase == 'reverse':\n blockReverseCount += 1\n GTblockPhase.append('reverse')\n\n # find prevalent phase \n if all_same(GTblockPhase) and (len(GTblockPhase) >= 2): # absolutely consistent with parental genotypes\n if GTblockPhase[0] == ['same']:\n RSratio = 1.0\n else:\n RSratio = 0.0\n RSratio = 0.0\n elif GTblockPhase == []: # phase unknown\n RSratio = 'NA'\n else:\n RSratio = float(blockSameCount)/float(blockSameCount+blockReverseCount) # proportion of 'same' phasing state in block strings\n\n # define the block phase and produce output\n if (RSratio == 'NA') or (RSratio < 0.90 and RSratio > 0.10): # discard block that have > 90% of inconsistency with parental reference genotypes, or \n for j in range(len(GTblock)):\n posBlockPrint = posBlock[j]\n GTblockPrint1 = 'N'\n GTblockPrint2 = 'N'\n GTblockReturn.append([posBlockPrint[0], posBlockPrint[1], GTblockPrint1, GTblockPrint2])\n else: # phase according to the prevalent state\n # find prevalent state\n phaseStateNumber = max(map(GTblockPhase.count, GTblockPhase))\n GTblockDefinedPahse = list(set( i for i in GTblockPhase if GTblockPhase.count(i) == phaseStateNumber ))\n if len(GTblockDefinedPahse) == 1: # check if one state is prevalent\n if GTblockDefinedPahse == ['same']:\n phaseState = [0,1]\n else:\n phaseState = [1,0]\n for j in range(len(GTblock)):\n GT = GTblock[j]\n posBlockPrint = posBlock[j]\n GTblockPrint1 = GT[phaseState[0]]\n GTblockPrint2 = GT[phaseState[1]]\n GTblockReturn.append([posBlockPrint[0], posBlockPrint[1], GTblockPrint1, GTblockPrint2])\n else: # if there is conflict in phasing state, set to Ns. It usually applies for blocks with less then 10 position overlaps with parental reference.\n for j in range(len(GTblock)):\n posBlockPrint = posBlock[j]\n GTblockPrint1 = 'N'\n GTblockPrint2 = 'N'\n GTblockReturn.append([posBlockPrint[0], posBlockPrint[1], GTblockPrint1, GTblockPrint2])\n phaseState = [0,1]\n\n return(GTblockReturn, RSratio)", "def fifthSubIter(validateMatrix):\n assert np.ndim(validateMatrix) == 3\n fourthTransition = _rot3D90(validateMatrix, 'y', 3)\n listedMatrix = list(np.reshape(fourthTransition, 27))\n del(listedMatrix[13])\n val1 = _getTempsDelexpression(listedMatrix)\n # str1 = ''.join(str(e) for e in listedMatrix)\n return val1, listedMatrix", "def test_Q4_stiffness_2dof(self):\n material = FEMOL.materials.IsotropicMaterial(E=210e6, mu=0.3, rho=1)\n thickness = 0.0250\n C = material.plane_tensor(thickness)\n element_points = np.array([[0, 0], [0.5, 0], [0.5, 0.25], [0, 0.25]])\n element = FEMOL.elements.Q4(element_points, N_dof=2)\n\n self.assertTrue((np.abs(np.sum(element.Ke(C) - FEMOL.test_utils.reference_2dof_stiffness_matrix())) < 1000))", "def ramp7(params, phase, args=dict(n=5, guess=[1, 0.034, 0.35, 0.005, 0.35])):\n # 2013-12-07 14:08 IJMC: Created.\n\n if params[4]>=phase.min():\n params[4] = phase.min() - np.diff(phase).mean()/1e6\n \n return params[0] * (1. + params[1] * (phase - 0.5) + \\\n params[2] * (phase - 0.5)**2 + \\\n params[3] * np.log(phase - params[4]))", "def test_path4():\n path = [\n (0, 0, 1),\n [('A', 3, 0)],\n (0, 1, 1),\n [('A', 2, 0)],\n (np.pi/2, 1, 1),\n (0, 1, 1),\n (np.pi/2, 1, 1),\n (0, 1, 1),\n (np.pi/2, 1, 1),\n (0, 1, 1),\n (np.pi/2, 1, 1),\n ] * 4\n execute_path(path,True)", "def processPhaseSequence(self):\n phasesInRing1, phasesInRing2 = ([] for i in range(2))\n\n [phasesInRing1.append(index+1) for index, value in enumerate(self.phaseDurationList) if value > 0.0 and index < 4]\n [phasesInRing2.append(index+1) for index, value in enumerate(self.phaseDurationList) if value > 0.0 and index >= 4]\n\n if len(phasesInRing1) > 0 and len(phasesInRing2) > 0:\n self.phaseSequenceInRing1.extend(phasesInRing1)\n self.phaseSequenceInRing2.extend(phasesInRing2)\n\n self.processPhaseHeight(phasesInRing1, phasesInRing2)", "def five2four_tiling_strategy(tensor, c_value=None, expansion=None):\n strategy = list()\n if c_value is None:\n strategy = ct_util.create_template(tensor=tensor,\n template=ct_util.TileTemplate.NC1HWC0)\n elif not shape_is_dynamic(tensor):\n c_value = 16 if c_value < 16 else c_value\n node_n = ct_util.create_constraint_on_tensor(tensor=tensor,\n values=1,\n constraints=ct_util.TileConstraint.FACTOR,\n tensor_pos=0)\n node_c1 = ct_util.create_constraint_on_tensor(tensor=tensor,\n values=\"FULL\",\n constraints=ct_util.TileConstraint.MAX,\n tensor_pos=1)\n node_c0 = ct_util.create_constraint_on_tensor(tensor=tensor,\n values=c_value,\n constraints=ct_util.TileConstraint.FACTOR,\n tensor_pos=4)\n strategy = node_n + node_c1 + node_c0\n if expansion:\n strategy.append(ct_util.create_constraint_on_tensor(tensor=tensor,\n values=expansion,\n constraints=ct_util.TileConstraint.SET_EXPANSION)[0])\n if shape_is_dynamic(tensor):\n # axis should be full tiled due to cast operator\n strategy.append(ct_util.modify_common_constraints(\n value=0.85, constraint=ct_util.TileConstraint.SET_MEM_RATIO))\n return strategy", "def connected_four(bit_board):\n\n # Horizontal check\n m = bit_board & (bit_board >> 7)\n if m & (m >> 14):\n return True\n # Diagonal \\\n m = bit_board & (bit_board >> 6)\n if m & (m >> 12):\n return True\n # Diagonal /\n m = bit_board & (bit_board >> 8)\n if m & (m >> 16):\n return True\n # Vertical\n m = bit_board & (bit_board >> 1)\n if m & (m >> 2):\n return True\n # Nothing found\n return False", "def move_length_5(self, move, new_state):\n # First consider the move on 6 corners\n new1_state = new_state\n if move in [\"A\", \"B\", \"O\", \"U\", \"T\", \"Y\"]:\n corners = [[\"A\", 1, 2, 2, 5, 9, 14, 0, 3, 7, 12, 18, 24, 17],\n [\"B\", 0, 2, 4, 8, 13, 19, 12, 3, 6, 10, 15, 20, 1],\n [\"O\", 20, 13, 9, 5, 2, 0, 0, 15, 16, 17, 18, 19, 10],\n [\"U\", 14, 13, 21, 22, 23, 24, 11, 1, 3, 6, 10, 15, 1],\n [\"T\", 24, 9, 1, 4, 8, 13, 12, 14, 15, 16, 17, 18, 10],\n [\"Y\", 19, 9, 20, 21, 22, 23, 11, 0, 3, 7, 12, 18, 17]]\n for i in corners:\n new1_state = self.loop51(move, new_state, i)\n\n if move in [\"C\", \"E\", \"J\", \"N\", \"V\", \"X\"]:\n # then consider the move on the middle1 of each side\n middle1 = [[\"C\", 3, 4, 4, 0, 5, 9, 14, 0, 6, 11, 17, 23, 16],\n [\"E\", 2, 3, 4, 1, 8, 13, 19, 12, 7, 11, 16, 21, 3],\n [\"J\", 15, 21, 14, 10, 11, 12, 13, 8, 0, 2, 5, 14, 0],\n [\"N\", 18, 23, 7, 1, 4, 8, 19, 12, 9, 10, 11, 12, 8],\n [\"V\", 9, 15, 14, 20, 22, 23, 24, 11, 4, 7, 11, 16, 3],\n [\"X\", 13, 18, 7, 20, 21, 22, 24, 11, 2, 6, 11, 17, 16]]\n for i in middle1:\n new1_state = self.loop52(move, new_state, i)\n return StonehengeState(not self.p1_turn, new1_state.length,\n new1_state.letters, new1_state.claim)", "def scoreSevenHand(hand):\n handCombos = list(itertools.combinations(hand, 5))\n return max(scoreFiveHand(hc) for hc in handCombos)", "def rk4(x,t,tau,derivsRK,param): #couldn't get it to import right so I just copy pasted.\r\n \r\n half_tau = 0.5*tau\r\n F1 = derivsRK(x,t,param) \r\n t_half = t + half_tau\r\n xtemp = x + half_tau*F1\r\n F2 = derivsRK(xtemp,t_half,param) \r\n xtemp = x + half_tau*F2\r\n F3 = derivsRK(xtemp,t_half,param)\r\n t_full = t + tau\r\n xtemp = x + tau*F3\r\n F4 = derivsRK(xtemp,t_full,param)\r\n xout = x + tau/6.*(F1 + F4 + 2.*(F2+F3))\r\n return xout" ]
[ "0.56203735", "0.5353156", "0.5339077", "0.53312033", "0.52918285", "0.52905166", "0.5248838", "0.52097756", "0.5169619", "0.51692957", "0.51207316", "0.50766826", "0.5064046", "0.5053413", "0.50450885", "0.50349313", "0.503351", "0.49939486", "0.49711737", "0.49670827", "0.49572968", "0.49537754", "0.4938196", "0.4930792", "0.48999292", "0.48927122", "0.48653322", "0.48583114", "0.4843019", "0.4833436", "0.48266596", "0.482073", "0.48179618", "0.48140004", "0.47986665", "0.47815862", "0.47655135", "0.47478545", "0.47435588", "0.47428775", "0.4739191", "0.4734199", "0.47306135", "0.47219113", "0.47215816", "0.47134975", "0.46744117", "0.46720698", "0.46701384", "0.46647158", "0.46564734", "0.46488965", "0.46369323", "0.4636929", "0.46368048", "0.46321726", "0.46301475", "0.46299183", "0.4628861", "0.46150506", "0.4591878", "0.45859563", "0.45846188", "0.45730242", "0.4567985", "0.45594028", "0.454841", "0.45391965", "0.45350417", "0.45330313", "0.4530674", "0.4524836", "0.45230138", "0.45217252", "0.45174968", "0.45133215", "0.45063272", "0.44904557", "0.44872102", "0.4483426", "0.44816476", "0.44769907", "0.44757685", "0.44738153", "0.44677466", "0.44584593", "0.4455179", "0.44500634", "0.44490692", "0.44381896", "0.44348174", "0.4434582", "0.44318202", "0.44308415", "0.44266048", "0.44251862", "0.44229618", "0.4422565", "0.44219688", "0.4413741" ]
0.58114266
0
phase1 stages the centers on sides L and R phase2 stages the centers on sides F and B and put the LR centers in one of 495 states that can be solved without L L' R R'...this is prep work for phase 3 TODO this needs more work BLBFRUFRDDFBUULBRLBRRLDLDLFURFLUBUDRRRDDFDFBBLUFRUFFBBFBLLLDBDFBDBLFDUUFRFBLDUDDURFDRBBDFUUFUBFBDLULDLRRUDFDFULLLUUBUDRLURLBBDURFRBULBRFRBRDRRULDFLFLR results in "5x5x5 edge swaps are odd, cannot pair edges"
def group_centers_phase1_and_2(self) -> None: self.rotate_U_to_U() self.rotate_F_to_F() if self.centers_staged(): return original_state = self.state[:] original_solution = self.solution[:] tmp_solution_len = len(self.solution) # find multiple phase1 solutions phase1_solutions = self.lt_LR_centers_stage.solutions_via_c(solution_count=100) pt_state_indexes = [] pt_state_indexes_LR_centers_special = [] phase2_pt_state_indexes_to_phase1_solution = {} logger.info(f"found {len(phase1_solutions)} phase1 solutions") # find the phase2 solution for each phase1 solution for phase1_solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state) in phase1_solutions: self.state = original_state[:] self.solution = original_solution[:] for step in phase1_solution: self.rotate(step) # stage the LR centers phase2_pt_state_indexes = tuple([pt.state_index() for pt in self.lt_FB_centers_stage.prune_tables]) pt_state_indexes.append(phase2_pt_state_indexes) phase2_pt_state_indexes_to_phase1_solution[phase2_pt_state_indexes] = phase1_solution # stage the LR centers and put them into one of 495 states solveable with L L' R R' phase2_pt_state_indexes = tuple( [pt.state_index() for pt in self.lt_FB_centers_stage_LR_centers_special.prune_tables] ) pt_state_indexes_LR_centers_special.append(phase2_pt_state_indexes) phase2_pt_state_indexes_to_phase1_solution[phase2_pt_state_indexes] = phase1_solution self.state = original_state[:] self.solution = original_solution[:] # stage the FB centers phase2_solutions = self.lt_FB_centers_stage.solutions_via_c(pt_states=pt_state_indexes, solution_count=1) phase2_solution = phase2_solutions[0][0] # stage the FB centers and put LR centers into one of 495 states solveable with L L' R R' phase2_solutions_lr_centers_special = self.lt_FB_centers_stage_LR_centers_special.solutions_via_c( pt_states=pt_state_indexes_LR_centers_special, solution_count=1 ) phase2_solution_lr_centers_special = phase2_solutions_lr_centers_special[0][0] # if we can put the LR centers into one of 495 states without adding to the move count, make it so if len(phase2_solution_lr_centers_special) <= len(phase2_solution): min_phase2_solution, ( pt0_state, pt1_state, pt2_state, pt3_state, pt4_state, ) = phase2_solutions_lr_centers_special[0] min_phase1_solution = phase2_pt_state_indexes_to_phase1_solution[pt0_state, pt1_state, pt2_state] else: min_phase2_solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state) = phase2_solutions[0] min_phase1_solution = phase2_pt_state_indexes_to_phase1_solution[pt0_state, pt1_state] logger.info( f"phase2 solution length {len(phase2_solution)}, phase2_lr_centers_special solution length {len(phase2_solution_lr_centers_special)}" ) for step in min_phase1_solution: self.rotate(step) self.print_cube_add_comment("LR centers staged", tmp_solution_len) tmp_solution_len = len(self.solution) for step in min_phase2_solution: self.rotate(step) self.print_cube_add_comment("UD FB centers staged", tmp_solution_len)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chain_corrections():\n \n #read the files\n sample_4m=read_sample(map_files('sample_4m'))\n empty_cell_4m=read_sample(map_files('empty_cell_4m'))\n empty_4m=read_sample(map_files('empty_4m'))\n transmission_sample_cell_4m=read_sample(map_files('trans_sample_4m'))\n transmission_empty_cell_4m=read_sample(map_files('trans_empty_cell_4m'))\n blocked_beam_4m=read_sample(map_files('blocked_4m'))\n sensitivity=read_div(map_files('div'))\n #mask=read_sample(map_files('mask'))\n \n #normalize the monitors\n \n sample_4m_norm=monitor_normalize(sample_4m)\n empty_cell_4m_norm=monitor_normalize(empty_cell_4m)\n transmission_sample_cell_4m_norm=monitor_normalize(transmission_sample_cell_4m)\n transmission_empty_cell_4m_norm=monitor_normalize(transmission_empty_cell_4m)\n empty_4m_norm=monitor_normalize(empty_4m)\n blocked_beam_4m_norm=monitor_normalize(blocked_beam_4m)\n \n #calculate q\n sample_4m_norm_q=convert_q(sample_4m_norm)\n empty_cell_4m_norm_q=convert_q(empty_cell_4m)\n blocked_beam_4m_norm_q=convert_q(blocked_beam_4m_norm)\n transmission_sample_cell_4m_norm_q=convert_q(transmission_sample_cell_4m_norm)\n transmission_empty_cell_4m_norm_q=convert_q(transmission_empty_cell_4m_norm)\n empty_4m_norm_q=convert_q(empty_4m_norm)\n \n \n print 'converted'\n #convert flatness\n sample_4m_solid=correct_solid_angle(sample_4m_norm_q)\n empty_cell_4m_solid=correct_solid_angle(empty_cell_4m_norm_q)\n blocked_beam_4m_solid=correct_solid_angle(blocked_beam_4m_norm_q)\n transmission_sample_cell_4m_solid=correct_solid_angle(transmission_sample_cell_4m_norm_q)\n transmission_empty_cell_4m_solid=correct_solid_angle(transmission_empty_cell_4m_norm_q)\n empty_4m_solid=correct_solid_angle(empty_4m_norm_q)\n \n \n #calculate transmission\n coord_left=(60,60)\n coord_right=(70,70)\n transmission_sample_cell_4m_rat=generate_transmission(transmission_sample_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n transmission_empty_cell_4m_rat=generate_transmission(transmission_empty_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n print 'Sample transmission= {} (IGOR Value = 0.724)'.format(transmission_sample_cell_4m_rat)\n print 'Empty Cell transmission= {} (IGOR Value = 0.929)'.format(transmission_empty_cell_4m_rat)\n print 'hi'\n \n #Initial Correction -- Not with the sub/mult tools,\n #SAM = sample_4m_solid.data\n #print SAM.x\n #EMP = empty_4m_solid.data\n #print \"EMP: \"\n #print EMP.x\n #BGD = blocked_beam_4m_solid.data\n #print \"BGD\"\n #print BGD.x\n #Tsam = transmission_sample_cell_4m_rat\n #Temp = transmission_empty_cell_4m_rat\n #COR1 = SAM.__sub__(BGD)\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n \n SAM = sample_4m_solid\n print SAM.data.x\n EMP = empty_4m_solid\n print \"EMP: \"\n print EMP.data.x\n BGD = blocked_beam_4m_solid\n print \"BGD:\"\n print BGD.data.x\n Tsam = transmission_sample_cell_4m_rat\n Temp = transmission_empty_cell_4m_rat\n print \"COR1:\"\n COR1 = SAM.__sub1__(BGD)\n print COR1.data.x #check=works\n #-----Problems Here-------\n print \"COR2:\"\n COR2 = (EMP.__sub1__(BGD)) #check=works\n print COR2.data.x\n print \"COR3:\"\n #AJJ - __mul__ not working because Tsam and Temp are Measurement instances and not simply floats. See above.\n COR3 = COR2.__mul__(Tsam/Temp) #mul not working\n print COR3.data.x\n #COR = COR1.__sub1__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.data.x", "def center_flows(L_wprime, U_wprime, L_w3, U_w3, L_overlap, U_overlap):\n # examine every possible point\n current_dist_to_edge = -1\n point = (0,0)\n #print(\"w3 range: [{}, {}]\".format(L_w3, U_w3))\n #print(\"w' range: [{}, {}]\".format(L_wprime, U_wprime))\n #print(\"overlap range: [{},{}]\".format(L_overlap, U_overlap))\n for y in range(L_w3, U_w3 + 1):\n #print(\"y={}\".format(y))\n LH_bound = max(L_wprime, L_overlap - y)\n #print(\"LH bound = {}\".format(LH_bound))\n RH_bound = min(U_wprime, U_overlap - y)\n #print(\"RH bound = {}\".format(RH_bound))\n for x in range(LH_bound, RH_bound + 1):\n # w3 UB: 0x + 1y - U_w3 = 0\n # w3 LB: 0x + 1y - L_w3 = 0\n # wprime UB: 1x + 0y - U_wprime\n # wprime LB: 1x + 0y - L_wprime\n # wprime + w3 UB: 1x + 1y - U_wprime,wk\n # wprime + w3 LB: 1x + 1y - L_wprime,wk\n dist_to_edge = min(distance_point_to_line(x, y, 0, -1, U_w3), #0x-1y+U_w3=0\n distance_point_to_line(x, y, 0, -1, L_w3), #0x-1y+L_w3=0\n # -1x + 0y + U_wprime = 0\n distance_point_to_line(x, y, -1, 0, U_wprime),\n # -1x + 0y + L_wprime = 0\n distance_point_to_line(x, y, -1, 0, L_wprime),\n # -1x - 1y + U_overlap = 0\n distance_point_to_line(x, y, -1, -1, U_overlap),\n # -1 x - y + L_overlap = 0\n distance_point_to_line(x, y, -1, -1, L_overlap))\n if dist_to_edge > current_dist_to_edge:\n #print(\"At point ({},{}), distance to edge increased from {} to {}.\"\\\n # .format(x,y,current_dist_to_edge,dist_to_edge))\n current_dist_to_edge = dist_to_edge\n point = (x,y)\n return(point)", "def frame3dlin_Kg(E,A1,A2,L,Te1,Te2,R=None):\n Kge1= np.array([\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A2*E)/(10*L) , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A1*E)/(10*L)],\n [0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , (A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , (A2*E)/(10*L) , 0 , -((A2+3*A1)*E)/30 , 0 , 0 , 0 , -(A2*E)/(10*L) , 0 , ((A2+A1)*E)/60 , 0],\n [0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((A2+3*A1)*E)/30 , 0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((A2+A1)*E)/60],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A2*E)/(10*L) , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A1*E)/(10*L)],\n [0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , (A1*E)/(10*L) , 0 , ((A2+A1)*E)/60 , 0 , 0 , 0 , -(A1*E)/(10*L) , 0 , -((3*A2+A1)*E)/30 , 0],\n [0 , -(A1*E)/(10*L) , 0 , 0 , 0 , ((A2+A1)*E)/60 , 0 , (A1*E)/(10*L) , 0 , 0 , 0 , -((3*A2+A1)*E)/30]\n ])\n Kge2= np.array([\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A2*E)/(10*L) , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A1*E)/(10*L)],\n [0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , -(A2*E)/(10*L) , 0 , ((A2+3*A1)*E)/30 , 0 , 0 , 0 , (A2*E)/(10*L) , 0 , -((A2+A1)*E)/60 , 0],\n [0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((A2+3*A1)*E)/30 , 0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((A2+A1)*E)/60],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A2*E)/(10*L) , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A1*E)/(10*L)],\n [0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , (A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , -(A1*E)/(10*L) , 0 , -((A2+A1)*E)/60 , 0 , 0 , 0 , (A1*E)/(10*L) , 0 , ((3*A2+A1)*E)/30 , 0],\n [0 , (A1*E)/(10*L) , 0 , 0 , 0 , -((A2+A1)*E)/60 , 0 , -(A1*E)/(10*L) , 0 , 0 , 0 , ((3*A2+A1)*E)/30]])\n\n Kg = Kge1*Te1 + Kge2*Te2\n\n if (R is not None):\n RR = scipy.linalg.block_diag(R,R,R,R)\n Kg = np.transpose(RR).dot(Kg.dot(RR))\n\n return Kg", "def analyze_orbit_corrector(OC1, OC2, beamline, phase_beg):\n\n M = np.identity(4)\n OC_parameters = np.zeros(4)\n\n for element in beamline:\n M = np.dot(element.M1, M)\n\n # Since the X and Y are decoupled, we can treat them separately.\n M_x = M[0:2, 0:2]\n M_y = M[2:4, 2:4]\n\n L1 = [[OC1.length/2], [1]]\n L2 = [[OC2.length/2], [1]]\n\n M_OC1 = np.array(OC1.M1)[0:2, 0:2]\n M_OC2 = np.array(OC2.M1)[0:2, 0:2]\n\n # The following part solve the cx_1 and cx_2\n M1_x = np.linalg.multi_dot([M_OC2, M_x, L1])\n M2_x = np.linalg.multi_dot([M_OC2, M_x, M_OC1])\n M_OC_x = np.hstack((M1_x, L2))\n\n OC_parameters[0:2] = -np.linalg.multi_dot([np.linalg.inv(M_OC_x), M2_x, phase_beg[0:2]])\n # The end of the X-part\n\n # The following part solve the cy_1 and cy_2\n M1_y = np.linalg.multi_dot([M_OC2, M_y, L1])\n M2_y = np.linalg.multi_dot([M_OC2, M_y, M_OC1])\n M_OC_y = np.hstack((M1_y, L2))\n\n OC_parameters[2:4] = -np.linalg.multi_dot([np.linalg.inv(M_OC_y), M2_y, phase_beg[2:4]])\n # The end of the Y-part\n\n\n return OC_parameters", "def linear_LS_triangulation(u1, P1, u2, P2):\n A = np.zeros((4, 3))\n b = np.zeros((4, 1))\n\n # Create array of triangulated points\n x = np.zeros((3, len(u1)))\n\n # Initialize C matrices\n C1 = np.array(linear_LS_triangulation_C)\n C2 = np.array(linear_LS_triangulation_C)\n\n for i in range(len(u1)):\n # Derivation of matrices A and b:\n # for each camera following equations hold in case of perfect point matches:\n # u.x * (P[2,:] * x) = P[0,:] * x\n # u.y * (P[2,:] * x) = P[1,:] * x\n # and imposing the constraint:\n # x = [x.x, x.y, x.z, 1]^T\n # yields:\n # (u.x * P[2, 0:3] - P[0, 0:3]) * [x.x, x.y, x.z]^T + (u.x * P[2, 3] - P[0, 3]) * 1 = 0\n # (u.y * P[2, 0:3] - P[1, 0:3]) * [x.x, x.y, x.z]^T + (u.y * P[2, 3] - P[1, 3]) * 1 = 0\n # and since we have to do this for 2 cameras, and since we imposed the constraint,\n # we have to solve 4 equations in 3 unknowns (in LS sense).\n #\n # Build C matrices, to construct A and b in a concise way\n C1[:, 2] = u1[i, :]\n C2[:, 2] = u2[i, :]\n\n # Build A matrix:\n # [\n # [ u1.x * P1[2,0] - P1[0,0], u1.x * P1[2,1] - P1[0,1], u1.x * P1[2,2] - P1[0,2] ],\n # [ u1.y * P1[2,0] - P1[1,0], u1.y * P1[2,1] - P1[1,1], u1.y * P1[2,2] - P1[1,2] ],\n # [ u2.x * P2[2,0] - P2[0,0], u2.x * P2[2,1] - P2[0,1], u2.x * P2[2,2] - P2[0,2] ],\n # [ u2.y * P2[2,0] - P2[1,0], u2.y * P2[2,1] - P2[1,1], u2.y * P2[2,2] - P2[1,2] ]\n # ]\n A[0:2, :] = C1.dot(P1[0:3, 0:3]) # C1 * R1\n A[2:4, :] = C2.dot(P2[0:3, 0:3]) # C2 * R2\n\n # Build b vector:\n # [\n # [ -(u1.x * P1[2,3] - P1[0,3]) ],\n # [ -(u1.y * P1[2,3] - P1[1,3]) ],\n # [ -(u2.x * P2[2,3] - P2[0,3]) ],\n # [ -(u2.y * P2[2,3] - P2[1,3]) ]\n # ]\n b[0:2, :] = C1.dot(P1[0:3, 3:4]) # C1 * t1\n b[2:4, :] = C2.dot(P2[0:3, 3:4]) # C2 * t2\n b *= -1\n\n # Solve for x vector\n cv2.solve(A, b, x[:, i:i + 1], cv2.DECOMP_SVD)\n\n return np.transpose(x), np.ones(len(u1), dtype=bool)", "def phase_blocks(posBlock, GTblock, RefBlock, FlagB):\n blockSameCount = 0\n blockReverseCount = 0\n GTblockPhase = []\n GTblockReturn = []\n\n for i in range(len(GTblock)):\n GT = GTblock[i]\n RefGT = RefBlock[i]\n if FlagB[i] == \"FV\": # uncertain variants are set to N\n GTblock[i] = ['N', 'N']\n else: # find and count cases when phased genotype is consistent/inconsistent with parental genotypes\n GTphase = phase_state(GT, RefGT)\n if GTphase == 'same':\n blockSameCount += 1\n GTblockPhase.append('same')\n elif GTphase == 'reverse':\n blockReverseCount += 1\n GTblockPhase.append('reverse')\n\n # find prevalent phase \n if all_same(GTblockPhase) and (len(GTblockPhase) >= 2): # absolutely consistent with parental genotypes\n if GTblockPhase[0] == ['same']:\n RSratio = 1.0\n else:\n RSratio = 0.0\n RSratio = 0.0\n elif GTblockPhase == []: # phase unknown\n RSratio = 'NA'\n else:\n RSratio = float(blockSameCount)/float(blockSameCount+blockReverseCount) # proportion of 'same' phasing state in block strings\n\n # define the block phase and produce output\n if (RSratio == 'NA') or (RSratio < 0.90 and RSratio > 0.10): # discard block that have > 90% of inconsistency with parental reference genotypes, or \n for j in range(len(GTblock)):\n posBlockPrint = posBlock[j]\n GTblockPrint1 = 'N'\n GTblockPrint2 = 'N'\n GTblockReturn.append([posBlockPrint[0], posBlockPrint[1], GTblockPrint1, GTblockPrint2])\n else: # phase according to the prevalent state\n # find prevalent state\n phaseStateNumber = max(map(GTblockPhase.count, GTblockPhase))\n GTblockDefinedPahse = list(set( i for i in GTblockPhase if GTblockPhase.count(i) == phaseStateNumber ))\n if len(GTblockDefinedPahse) == 1: # check if one state is prevalent\n if GTblockDefinedPahse == ['same']:\n phaseState = [0,1]\n else:\n phaseState = [1,0]\n for j in range(len(GTblock)):\n GT = GTblock[j]\n posBlockPrint = posBlock[j]\n GTblockPrint1 = GT[phaseState[0]]\n GTblockPrint2 = GT[phaseState[1]]\n GTblockReturn.append([posBlockPrint[0], posBlockPrint[1], GTblockPrint1, GTblockPrint2])\n else: # if there is conflict in phasing state, set to Ns. It usually applies for blocks with less then 10 position overlaps with parental reference.\n for j in range(len(GTblock)):\n posBlockPrint = posBlock[j]\n GTblockPrint1 = 'N'\n GTblockPrint2 = 'N'\n GTblockReturn.append([posBlockPrint[0], posBlockPrint[1], GTblockPrint1, GTblockPrint2])\n phaseState = [0,1]\n\n return(GTblockReturn, RSratio)", "def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts", "def stitch(KPS1, KPS2, H1, H2, match): #---- stich image to previous one\r\n #--- projection image1 from plane to cylindrical ---\r\n total = np.minimum(match.shape[0],100); # total pairing number\r\n bin1 = match[0:total,0].astype(int); # feature no at image 1\r\n R1 = KPS1.keyz[bin1, 0]; # keypoint Y at image 1\r\n C1 = KPS1.keyz[bin1, 1]; # keypoint X at image 1\r\n V1, U1 = pano_tools.project_p2c_points(R1, C1, H1);\r\n #--- image 2 ---\r\n bin2 = match[0:total,1].astype(int); # feature no at image 2\r\n R2 = KPS2.keyz[bin2, 0]; # keypoint Y at image 2\r\n C2 = KPS2.keyz[bin2, 1]; # keypoint X at image 2\r\n Rc2 = H2[0]/2; Rp2= R2 - Rc2; \r\n Cc2 = H2[1]/2; Cp2= C2 - Cc2;\r\n #--- --- \r\n # {phi1,S1,TU1,TV1} = M*M matrix: which is derived by chosen 2 pairs \r\n # {phi0,S0,TU0,TV0} = scalar: which is initial guess by removing outlier\r\n # \r\n phi1,S1,TU1,TV1= pano_tools.derive_p2c_formula(U1,V1,Cp2,Rp2);\r\n seq,phi0,S0,TU0,TV0 = pano_tools.remove_ill_matched_pair(phi1,S1,TU1,TV1); \r\n #--- linear regression [not necessary] ---\r\n # U1X = U1[seq]; C2X = C2[seq]; V1X = V1[seq]; R2X = R2[seq]; \r\n # phi0,S0,TU0,TV0,Err= pano_tools.linear_regression(V1X,U1X,R2X,C2X, phi0,S0,TU0,TV0,H2)\r\n H2[3]= phi0; H2[4]= S0; H2[5]= TV0; H2[6]= TU0;", "def combine_phase(laz, raz, grf_lf_ind, grf_rf_ind, hz, acc_hip_z, acc_hip_x, total_accel):\n # reshape for faster computation\n laz = laz.values.reshape(-1, )\n raz = raz.values.reshape(-1, )\n\n # Check and mark rows with missing data\n length = len(laz)\n missing_data = False\n nan_row = []\n if np.isnan(laz).any() or np.isnan(raz).any():\n missing_data = True\n if missing_data:\n nan_row = np.where(np.isnan(laz) | np.isnan(raz))[0]\n finite_row = np.array(list(set(range(length)) - set(nan_row)))\n laz = np.delete(laz, nan_row, )\n raz = np.delete(raz, nan_row, )\n\n # Filter through low-pass filter\n la_magn = filter_data(laz, filt='low', highcut=ct.cutoff_magn, fs=hz)\n ra_magn = filter_data(raz, filt='low', highcut=ct.cutoff_magn, fs=hz)\n\n acc_hip_z = filter_data(acc_hip_z, filt='low', highcut=6)\n acc_hip_x = filter_data(acc_hip_x, filt='low', highcut=40)\n acc_hip = filter_data(total_accel, filt='low', highcut=15)\n\n # Get balance/movement phase and start and end of movement phase for both\n # right and left foot\n lf_ph, lf_sm, lf_em = _body_phase(la_magn, hz)\n rf_ph, rf_sm, rf_em = _body_phase(ra_magn, hz)\n\n _impact_detect(phase=lf_ph,\n start_move=lf_sm,\n end_move=lf_em,\n grf=grf_lf_ind,\n acc_hip_z=acc_hip_z,\n acc_hip_x=acc_hip_x,\n acc_hip=acc_hip) # detect and add impacts\n del lf_sm, lf_em # no use in further computations\n\n _impact_detect(phase=rf_ph,\n start_move=rf_sm,\n end_move=rf_em,\n grf=grf_rf_ind,\n acc_hip_z=acc_hip_z,\n acc_hip_x=acc_hip_x,\n acc_hip=acc_hip) # detect and add impacts\n del rf_sm, rf_em, raz # no use in further computations\n\n # Insert previous value for phase where data needed to predict was missing\n if missing_data:\n phase_lf = np.ones(length).astype(int)\n phase_lf[finite_row] = lf_ph\n phase_rf = np.ones(length).astype(int)\n phase_rf[finite_row] = rf_ph\n for i in nan_row:\n phase_lf[i] = phase_lf[i - 1]\n phase_rf[i] = phase_rf[i - 1]\n else:\n phase_lf, phase_rf = lf_ph, rf_ph\n\n return phase_lf, phase_rf", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_H2 = self.edp_par['rho_H2'].value\n Z_H2 = self.edp_par['Z_H2'].value\n sigma_H2 = self.edp_par['sigma_H2'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n \n # Make sure Z_H2 > Z_H1. If Z_H2 < Z_H1, swap them\n if Z_H1 > Z_H2:\n Z_H1, Z_H2 = Z_H2, Z_H1\n sigma_H1, sigma_H2 = sigma_H2, sigma_H1\n rho_H1, rho_H2 = rho_H2, rho_H1\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H2 + sigma_H2\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG += 2*rho_H2*sigma_H2 * cos(alpha*Z_H2) * exp(-0.5*(alpha*sigma_H2)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def processPhaseHeight(self, phasesInRing1, phasesInRing2):\n P11, P12, P21, P22 = ([] for i in range(4))\n phaseHeightDictionary = {}\n\n [P11.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index < 2]\n [P12.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 2 and index < 4]\n [P21.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 4 and index < 6]\n [P22.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 6 and index < 8]\n\n if (len(P11) == len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 10\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 10\n\n elif (len(P11) < len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 20\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 10\n\n elif (len(P11) > len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 10\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 20\n\n if (len(P12) == len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 10\n\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 10\n\n elif (len(P12) < len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 20\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 10\n\n elif (len(P12) > len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 10\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 20\n\n for phase in phasesInRing1:\n for key, value in phaseHeightDictionary.items():\n if int(key) == phase:\n self.phaseHeightInRing1.append(value)\n\n for phase in phasesInRing2:\n for key, value in phaseHeightDictionary.items():\n if int(key) == phase:\n self.phaseHeightInRing2.append(value)", "def monolayer_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [0, 0, h], 0),\n ('B', [s, 0, 0], 0),\n ('C', [ax/2, ay/2, 0], 0),\n ('D', [ax/2 + s, ay/2, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([ 0, 1], 'A', 'B', 't5'),\n ([ 0, -1], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5'),\n ([ 0, 1], 'C', 'D', 't5'),\n ([ 0, -1], 'C', 'D', 't5'),\n )\n\n return lat", "def eo_edges(self):\n logger.info(\"eo_edges called\")\n permutations = []\n original_state = self.state[:]\n original_solution = self.solution[:]\n tmp_solution_len = len(self.solution)\n\n # Build a list of the wing strings at each midge\n wing_strs = []\n\n for _, square_index, partner_index in midges_recolor_tuples_555:\n square_value = self.state[square_index]\n partner_value = self.state[partner_index]\n wing_str = square_value + partner_value\n wing_str = wing_str_map[square_value + partner_value]\n wing_strs.append(wing_str)\n\n # build a list of all possible EO permutations...an even number of edges must be high\n for num in range(4096):\n num = str(bin(num)).lstrip(\"0b\").zfill(12)\n if num.count(\"1\") % 2 == 0:\n permutations.append(list(map(int, num)))\n\n # Put all 2048 starting states in a file and point ida-via-graph\n # at the file so it can solve all of them and apply the one that is the shortest.\n lr_center_stage_states = []\n eo_outer_orbit_states = []\n eo_inner_orbit_states = []\n\n for permutation in permutations:\n must_be_uppercase = []\n must_be_lowercase = []\n self.state = original_state[:]\n\n for wing_str, uppercase in zip(wing_strs, permutation):\n if uppercase:\n must_be_uppercase.append(wing_str)\n else:\n must_be_lowercase.append(wing_str)\n\n # logger.info(\"%s: %s permutation %s\" % (self, index, \"\".join(map(str, permutation))))\n self.edges_flip_orientation(must_be_uppercase, must_be_lowercase)\n\n # build lists of the states that we need to find state_indexes for\n lr_center_stage_states.append(self.lt_phase3_lr_center_stage.state())\n eo_outer_orbit_states.append(self.lt_phase3_eo_outer_orbit.state())\n eo_inner_orbit_states.append(self.lt_phase3_eo_inner_orbit.state())\n\n # now we have a huge list of states to lookup, do a binary search on multiple states at once (this is drastically faster\n # than binary searching for them individually). state_index_multiple() will return a dict where the state is the key\n # and the state_index is the value.\n lr_center_stage_eo_inner_orbit_state_indexes = self.lt_phase3_lr_center_stage.state_index_multiple(\n lr_center_stage_states\n )\n eo_outer_orbit_state_indexes = self.lt_phase3_eo_outer_orbit.state_index_multiple(eo_outer_orbit_states)\n eo_inner_orbit_state_indexes = self.lt_phase3_eo_inner_orbit.state_index_multiple(eo_inner_orbit_states)\n\n # build a list of tuples of the state indexes\n pt_state_indexes = []\n for lr_center_stage_eo_inner_orbit_state, eo_outer_orbit_state, eo_inner_orbit_state in zip(\n lr_center_stage_states, eo_outer_orbit_states, eo_inner_orbit_states\n ):\n pt_state_indexes.append(\n (\n lr_center_stage_eo_inner_orbit_state_indexes[lr_center_stage_eo_inner_orbit_state],\n eo_outer_orbit_state_indexes[eo_outer_orbit_state],\n eo_inner_orbit_state_indexes[eo_inner_orbit_state],\n )\n )\n\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n # When solve_via_c is passed pt_state_indexes (2048 lines of states in this case), it will try all 2048 of them\n # to find the state that has the shortest solution.\n self.lt_phase3.solve_via_c(pt_states=pt_state_indexes)\n\n self.print_cube_add_comment(\"edges EOed into high/low groups\", tmp_solution_len)\n self.post_eo_state = self.state[:]\n self.post_eo_solution = self.solution[:]\n\n # re-color the cube so that the edges are oriented correctly so we can\n # pair 4-edges then 8-edges. After all edge pairing is done we will uncolor\n # the cube and re-apply the solution.\n self.edges_flip_orientation(wing_strs, [])\n self.highlow_edges_print()", "def do_BA2(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_update, col, col2, my_max, BA=0):\n # Setting the Format of inputs for using BA modules\n camera_params, points_3d, points_2d, camera_ind, points_ind, final_l1, final_l2, low_bound, up_bound, map_des, map_2d = get_things1(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_max)\n n_cameras = camera_params.shape[0]\n n_points = points_3d.shape[0]\n n = 9 * n_cameras + 3 * n_points\n m = 2 * points_2d.shape[0]\n # Optimisation Variable\n x0 = np.hstack((camera_params.ravel(), points_3d[:, 0:3].ravel()))\n resx = x0.copy()\n if(BA==1):\n # Standard BA Module\n f0 = fun(x0, n_cameras, n_points, camera_ind, points_ind, points_2d[:,:2], points_2d[:,2])\n A = bundle_adjustment_sparsity(n_cameras, n_points, camera_ind, points_ind)\n t0 = time.time()\n\n res = least_squares(fun, x0, jac_sparsity=A, bounds=(low_bound, up_bound), verbose=2, x_scale='jac', ftol=1e-4, method='trf',\n args=(n_cameras, n_points, camera_ind, points_ind, points_2d[:,:2], points_2d[:,2]))\n t1 = time.time()\n\n resx = res.x\n # Updating the Map with updated points and transformations\n my_min = 0\n my_max = np.max(camera_ind)+1\n H_op = np.zeros((3,4))\n H_op[0:3,0:3] = R.from_rotvec(resx[(my_max-1)*9:(my_max-1)*9+3]).as_matrix()\n H_op[0:3,3] = resx[(my_max-1)*9+3:(my_max-1)*9+6] # Updating the final transformation\n \n final_pts = np.array(resx[my_max*9:]).reshape(-1,3)\n ini_pts = np.array(x0[my_max*9:]).reshape(-1,3)\n map_view = np.vstack((map_view,resx[(my_max-1)*9:(my_max-1)*9+6])) # Updating Transformations in the map\n\n for i in range(my_min,my_max-1):\n map_view[i] = resx[i*9 : i*9+6]\n update_list = []\n count = 0\n count1 = 0\n for i in range(len(final_l1)):\n # Identifying the Map points\n if(final_l2[i]==1):\n update_list.append(final_l1[i])\n if(final_l2[i]==0):\n count1 += 1\n err = np.sqrt(np.sum(np.square((final_pts[points_ind[i]] - ini_pts[points_ind[i]]).ravel()))/3)\n map_3d[final_l1[i]] = final_pts[points_ind[i]] # Updating the map points\n if(np.max(map_cam[final_l1[i]])!=my_max-1):\n map_cam[final_l1[i]].append(my_max-1) # Updating the map views\n count +=1\n \n # Adding the Notseen points to the Map\n update_list = np.array(update_list)\n l2 = np.unique(np.sort(update_list))\n if(my_update==1):\n l1 = []\n l2 = []\n new_3d = []\n new_2d = []\n new_cam = []\n new_view = []\n new_des = []\n new_col = []\n l2 = np.unique(np.sort(update_list))\n j = 0\n for i in range(len(kp_2d)):\n if(i == l2[j]):\n j += 1\n if(j==len(l2)):\n j = 0\n else:\n pt = (np.linalg.inv(H_op[0:3,0:3])@(kp_3d[i].T - H_op[:,3]))\n new_3d.append(pt)\n new_2d = []\n new_cam = []\n new_des.append(des[i])\n new_2d.append(kp_2d[i])\n new_cam.append(my_max-1)\n new_col.append(col2[i])\n map_2d.append(new_2d)\n map_cam.append(new_cam)\n\n new_3d = np.array(new_3d)\n new_des = np.array(new_des)\n new_col = np.array(new_col)\n map_3d = np.vstack((map_3d,new_3d))\n map_des = np.vstack((map_des,new_des))\n col = np.vstack((col,new_col))\n\n return H_op, map_3d, map_2d, map_des, map_cam, map_view, col, my_max-1, len(l2)", "def update_chains(self):\r\n _, black_positions, white_positions = self.get_positions()\r\n\r\n self.bfs(black_positions, 1)\r\n self.bfs(white_positions, 2)", "def normal_modes_gHST(R, NL, KL, params, dispersion=[], spin_dir=[], sublattice_labels=[], b='hang', spring='auto',\n pin='auto'):\n try:\n NP, NN = np.shape(NL)\n except:\n '''There is only one particle.'''\n NP = 1\n NN = 0\n\n M1 = np.zeros((2 * NP, 2 * NP))\n M2 = np.zeros((2 * NP, 2 * NP))\n if spring == 'auto':\n spring = params['k'] * params['l'] ** 2 / (params['I3'] * np.abs(params['w3']))\n # If there is more than one particle, and if the speeds vary from particle to particle,\n # then make spring the same length as a dynamical matrix column\n if len(spring) > 0:\n if (abs(spring - spring[0]) > 1e-9).any():\n # The rotation rates vary from particle to particle, so reshape\n spring_new = np.zeros_like(spring)\n dmyi = 0 # a new index ('dummy i')\n for ii in range(NP):\n # Since 2 dof for position of pivot of gHST, double the size\n spring_new[dmyi] = spring[ii]\n spring_new[dmyi + 1] = spring[ii]\n dmyi += 2\n else:\n # the elements are all identical, so just keep the first one\n spring = spring[0]\n\n if pin == 'auto':\n gn = params['Mm'] * params['g']\n pin = params['l'] * gn / (params['I3'] * np.abs(params['w3']))\n # If there is more than one particle, and if the speeds vary from particle to particle,\n # then make pin the same length as a dynamical matrix column\n if len(pin) > 0:\n if (abs(pin - pin[0]) > 1e-9).any():\n # The rotation rates vary from particle to particle, so reshape\n pin_new = np.zeros_like(pin)\n dmyi = 0 # a new index ('dummy i')\n for ii in range(NP):\n # Since 2 dof for position of pivot of gHST, double the size\n pin_new[dmyi] = pin[ii]\n pin_new[dmyi + 1] = pin[ii]\n dmyi += 2\n else:\n # the elements are all identical, so just keep the first one\n pin = pin[0]\n\n m2_shape = np.shape(M2)\n\n if b == 'hang':\n b = np.zeros(NP)\n elif b == 'stand':\n b = np.ones(NP)\n\n if spin_dir == []:\n '''Assume antialigned with a, aligned with body axis 3'''\n spin_dir = np.ones(NP)\n\n print 'Constructing dynamical matrix...'\n for i in range(NP):\n for nn in range(NN):\n\n ni = NL[i, nn] # the number of the gyroscope i is connected to (particle j)\n k = KL[i, nn] # true connection?\n\n if len(dispersion) > 1:\n disp = 1. / (1. + dispersion[i])\n else:\n disp = 1.\n\n diffx = R[ni, 0] - R[i, 0]\n diffy = R[ni, 1] - R[i, 1]\n alphaij = 0.\n\n rij_mag = np.sqrt(diffx ** 2 + diffy ** 2)\n\n if k != 0:\n alphaij = np.arctan2(diffy, diffx)\n\n # for periodic systems, KL is -1 for particles on opposing boundaries\n if KL[i, nn] == -1:\n alphaij = (np.pi + alphaij) % (2 * pi)\n\n # What is this for?\n if KL[i, nn] == -2: # will only happen on first or last gyro in a line\n if i == 0 or i == (NP - 1):\n print i, '--> NL=-2 for this particle'\n yy = np.where(KL[i] == 1)\n dx = R[NL[i, yy], 0] - R[NL[i, yy], 0]\n dy = R[NL[i, yy], 1] - R[NL[i, yy], 1]\n al = (np.arctan2(dy, dx)) % (2 * pi)\n alphaij = np.pi - al\n if i == 1:\n alphaij = np.pi - ((90 / 2) * np.pi / 180.)\n else:\n alphaij = - ((90 / 2) * np.pi / 180.)\n\n Cos = np.cos(alphaij)\n Sin = np.sin(alphaij)\n\n if abs(Cos) < 10E-8:\n Cos = 0.0\n\n if abs(Sin) < 10E-8:\n Sin = 0\n\n Cos2 = Cos ** 2\n Sin2 = Sin ** 2\n CosSin = Cos * Sin\n\n # -1 for aligned with a, 1 for aligned with 3.\n # dir factor :== 1/(-1)^c = (-1)^c\n dir_factor = spin_dir[i]\n\n if len(sublattice_labels) > 0:\n if sublattice_labels[i] == 1:\n extra_factor = 1. * del_A_B\n # print self.del_A_B\n elif sublattice_labels[i] == 0:\n extra_factor = 1.\n else:\n extra_factor = 1.\n else:\n extra_factor = 1.\n\n M1[2 * i, 2 * i] += -disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dxi - dxi\n M1[2 * i, 2 * i + 1] += -disp * k * Sin2 * ((-1) ** b[i]) * dir_factor # dxi - dyi\n M1[2 * i, 2 * ni] += disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dxi - dxj\n M1[2 * i, 2 * ni + 1] += disp * k * Sin2 * ((-1) ** b[i]) * dir_factor # dxi - dyj\n\n # (y components)\n M1[2 * i + 1, 2 * i] += disp * k * Cos2 * ((-1) ** b[i]) * dir_factor # dyi - dxi\n M1[2 * i + 1, 2 * i + 1] += disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dyi - dyi\n M1[2 * i + 1, 2 * ni] += -disp * k * Cos2 * ((-1) ** b[i]) * dir_factor # dyi - dxj\n M1[2 * i + 1, 2 * ni + 1] += -disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dyi - dyj\n\n # if i==0:\n # print '\\n --- \\n added M1[2*i+1, 2*i] = ',disp*k*Cos2 *((-1)**b[i]) *dir_factor\n # print 'dir_factor = ', dir_factor\n # print 'k = ', k\n # print 'else =', ((-1)**b[i]) *dir_factor\n\n # pinning/gravitational matrix\n M2[2 * i, 2 * i + 1] = (1.) * disp * dir_factor * extra_factor\n M2[2 * i + 1, 2 * i] = -(1.) * disp * dir_factor * extra_factor\n\n # self.pin_array.append(2*pi*1*extra_factor)\n # Assumes:\n # (-1)**c adot = - spring* (-1)**b SUM{ z x nij*(nij.(dri-drj)) } + pin\n matrix = - (-spring * M1 + pin * M2)\n\n return matrix", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def triangulate(Kl, Kr, Twl, Twr, pl, pr, Sl, Sr):\r\n #--- FILL ME IN ---\r\n \r\n # Compute baseline (right camera translation minus left camera translation)\r\n Cr = (Twr)[0:3,-1] #left camera translaton\r\n Cl = (Twl)[0:3,-1] #right camera translation\r\n b = (Cr - Cl).reshape(3,1)\r\n \r\n \r\n # Unit vectors projecting from optical center to image plane points.\r\n # Use variables rayl and rayr for the rays.\r\n rayl = Twl[0:3,0:3].dot(inv(Kl)).dot(np.insert(pl,2,1, axis =0))\r\n rayl = rayl/norm(rayl) #convert to unit vector\r\n \r\n rayr = Twr[0:3,0:3].dot(inv(Kr)).dot(np.insert(pr,2,1, axis =0))\r\n rayr = rayr/norm(rayr) #convert to unit vector\r\n \r\n \r\n # Projected segment lengths.\r\n # Use variables ml and mr for the segment lengths.\r\n rLrR = rayl.T.dot(rayr)[0][0]\r\n ml = ((b.T.dot(rayl) - (b.T.dot(rayr))*(rLrR))/(1-rLrR**2))[0][0]\r\n mr = (rLrR*ml - b.T.dot(rayr))[0][0]\r\n \r\n # Segment endpoints.\r\n # User variables Pl and Pr for the segment endpoints.\r\n Pl = Cl.reshape(3,1) + rayl*ml\r\n Pr = Cr.reshape(3,1) + rayr*mr\r\n \r\n # Now fill in with appropriate ray Jacobians. These are \r\n # 3x4 matrices, but two columns are zeros (because the right\r\n # ray direction is not affected by the left image point and \r\n # vice versa).\r\n drayl = np.zeros((3, 4)) # Jacobian left ray w.r.t. image points.\r\n drayr = np.zeros((3, 4)) # Jacobian right ray w.r.t. image points.\r\n \r\n # Add code here...\r\n #rayl = f(x)_l/g(x)_l = r/norm(r). Equation for unit vector provided in the assignment\r\n #drayl = d/dx[f(x)_l/g(x)_l] = ( d/dx[f(x)_l]*g(x)_l - f(x)_l*d/dx[g(x)_l] / [g(x)_l]^2 )\r\n #where x is the image plane points in the left camera ul (i.e pl[0][0]), vl (i.e pl[1][0]), \r\n #and right camera ur (i.e pr[0][0]), vr (i.e pr[1][0])\r\n \r\n #As per equation in the assignment. I.e column vector (c1*u, c2*v, c3)\r\n fxl = Twl[:3,:3].dot(inv(Kl)).dot(np.array([[pl[0][0]],[pl[1][0]],[1]]))\r\n \r\n #f(x)_l = column vector(c1*ul, c2*vl + c3). \r\n #Therefore f(x)_l w.r.t u = f(x)l_u = column vector (c1, 0, 0,)\r\n fxl_u = Twl[:3,:3].dot(inv(Kl)).dot(np.array([[1],[0],[0]]))\r\n #Therefore f(x)_l w.r.t v = f(x)l_v = column vector (0, c2, 0,)\r\n fxl_v = Twl[:3,:3].dot(inv(Kl)).dot(np.array([[0],[1],[0]]))\r\n \r\n #Same math applied as with f(x)_l shown above - only that it is with the right camera\r\n fxr = Twr[:3,:3].dot(inv(Kr)).dot(np.array([[pr[0][0]],[pr[1][0]],[1]]))\r\n fxr_u = Twr[:3,:3].dot(inv(Kr)).dot(np.array([[1],[0],[0]]))\r\n fxr_v = Twr[:3,:3].dot(inv(Kr)).dot(np.array([[0],[1],[0]]))\r\n \r\n #Recall from above that g(x)_l = norm(r)\r\n gxl = norm(fxl)\r\n #g(x)_l wrt to u is; u*c1^2/norm(r). Where u*c1^2 = fxl_u.T.dot(fxl)\r\n # and gxl = norm(r)\r\n gxl_u = fxl_u.T.dot(fxl)/gxl \r\n #g(x)_l wrt to v is; v*c2^2/norm(r). Where v*c2^2 = fxl_v.T.dot(fxl)\r\n # and gxl = norm(r) \r\n gxl_v = fxl_v.T.dot(fxl)/gxl\r\n \r\n # same as above except with the right camera\r\n gxr = norm(fxr)\r\n gxr_u = fxr_u.T.dot(fxr)/gxr\r\n gxr_v = fxr_v.T.dot(fxr)/gxr\r\n \r\n #Fill in Jacobian results with results from above \r\n drayl[:,0] = ((fxl_u.dot(gxl) - fxl.dot(gxl_u))/(gxl*gxl)).reshape(3,)\r\n drayl[:,1] = ((fxl_v.dot(gxl) - fxl.dot(gxl_v))/(gxl*gxl)).reshape(3,) \r\n drayr[:,2] = ((fxr_u.dot(gxr) - fxr.dot(gxr_u))/(gxr*gxr)).reshape(3,)\r\n drayr[:,3] = ((fxr_v.dot(gxr) - fxr.dot(gxr_v))/(gxr*gxr)).reshape(3,)\r\n \r\n \r\n \r\n #------------------\r\n \r\n # Compute dml and dmr (partials wrt segment lengths).\r\n # Compute dml and dmr (partials wrt segment lengths).\r\n u = np.dot(b.T, rayl) - np.dot(b.T, rayr)*np.dot(rayl.T, rayr)\r\n v = 1 - np.dot(rayl.T, rayr)**2\r\n\r\n du = (b.T@drayl).reshape(1, 4) - \\\r\n (b.T@drayr).reshape(1, 4)*np.dot(rayl.T, rayr) - \\\r\n np.dot(b.T, rayr)*((rayr.T@drayl) + (rayl.T@drayr)).reshape(1, 4)\r\n \r\n dv = -2*np.dot(rayl.T, rayr)*((rayr.T@drayl).reshape(1, 4) + \\\r\n (rayl.T@drayr).reshape(1, 4))\r\n\r\n m = np.dot(b.T, rayr) - np.dot(b.T, rayl)@np.dot(rayl.T, rayr)\r\n n = np.dot(rayl.T, rayr)**2 - 1\r\n\r\n dm = (b.T@drayr).reshape(1, 4) - \\\r\n (b.T@drayl).reshape(1, 4)*np.dot(rayl.T, rayr) - \\\r\n np.dot(b.T, rayl)@((rayr.T@drayl) + (rayl.T@drayr)).reshape(1, 4)\r\n dn = -dv\r\n\r\n dml = (du*v - u*dv)/v**2\r\n dmr = (dm*n - m*dn)/n**2\r\n\r\n # Finally, compute Jacobian for P w.r.t. image points.\r\n JP = (ml*drayl + rayl*dml + mr*drayr + rayr*dmr)/2\r\n \r\n #--- FILL ME IN ---\r\n \r\n # 3D point.\r\n P = (Pl + Pr)/2\r\n \r\n # 3x3 landmark point covariance matrix (need to form\r\n # the 4x4 image plane covariance matrix first).\r\n M = np.zeros((4,4))\r\n M[0:2,0:2] = Sl\r\n M[2:4,2:4] = Sr\r\n \r\n S = JP.dot(M).dot(JP.T) #as per equation in the assignment\r\n\r\n # Check for correct outputs...\r\n correct = isinstance(Pl, np.ndarray) and Pl.shape == (3, 1) and \\\r\n isinstance(Pr, np.ndarray) and Pr.shape == (3, 1) and \\\r\n isinstance(P, np.ndarray) and P.shape == (3, 1) and \\\r\n isinstance(S, np.ndarray) and S.shape == (3, 3)\r\n\r\n if not correct:\r\n raise TypeError(\"Wrong type or size returned!\")\r\n\r\n return Pl, Pr, P, S", "def contract_tenors(self):\n\n\tself.r_outer_r[:,:,0,1,:] = self.r_outer_r[:,:,0,1,:]/(1. - self.k_dot_r[0,1,:])\n\tself.r_outer_r[:,:,0,2,:] = self.r_outer_r[:,:,0,2,:]/(1. - self.k_dot_r[0,2,:])\n\t\n\tself.r_outer_r[:,:,1,0,:] = self.r_outer_r[:,:,1,0,:]/(1. - self.k_dot_r[1,0,:])\n\tself.r_outer_r[:,:,1,2,:] = self.r_outer_r[:,:,1,2,:]/(1. - self.k_dot_r[1,2,:])\n\t\n\tself.r_outer_r[:,:,2,0,:] = self.r_outer_r[:,:,2,0,:]/(1. - self.k_dot_r[2,0,:])\n\tself.r_outer_r[:,:,2,1,:] = self.r_outer_r[:,:,2,1,:]/(1. - self.k_dot_r[2,1,:])\n\n\tself.delta_l = np.zeros((3,3,self.N),dtype=np.complex_)\n \n\tself.delta_l[0,1,:] = get_l(self,0,1)\n\tself.delta_l[1,0,:] = get_l(self,1,0)\n\t\n\tself.delta_l[0,2,:] = get_l(self,0,2)\n\tself.delta_l[2,0,:] = get_l(self,2,0)\n\t\n\tself.delta_l[1,2,:] = get_l(self,1,2)\n\tself.delta_l[2,1,:] = get_l(self,2,1)\n \n\treturn", "def __init__(self, start_t: float, end_t: float, num_time_blocks: int):\n self._num_time_blocks: int = num_time_blocks\n self._num_states: Optional[int] = None\n self._nlps: Dict[int, InteriorPointInterface] = dict() # keys are the time block index (passed into the build_model_for_time_block method\n self._link_forward_matrices: Dict[int, coo_matrix] = dict() # these get multiplied by the primal vars of the corresponding time block\n self._link_backward_matrices: Dict[int, coo_matrix] = dict() # these get multiplied by the primal vars of the corresponding time block\n self._link_forward_coupling_matrices: Dict[int, coo_matrix] = dict() # these get multiplied by the coupling variables\n self._link_backward_coupling_matrices: Dict[int, coo_matrix] = dict() # these get multiplied by the coupling variables\n\n self._primals_lb: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._primals_ub: BlockVector = BlockVector(self._num_time_blocks + 1)\n\n self._ineq_lb: BlockVector = BlockVector(self._num_time_blocks)\n self._ineq_ub: BlockVector = BlockVector(self._num_time_blocks)\n\n self._init_primals: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._primals: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._delta_primals: BlockVector = BlockVector(self._num_time_blocks + 1)\n\n self._init_slacks: BlockVector = BlockVector(self._num_time_blocks)\n self._slacks: BlockVector = BlockVector(self._num_time_blocks)\n self._delta_slacks: BlockVector = BlockVector(self._num_time_blocks)\n\n self._init_duals_eq: BlockVector = BlockVector(self._num_time_blocks)\n self._duals_eq: BlockVector = BlockVector(self._num_time_blocks)\n self._delta_duals_eq: BlockVector = BlockVector(self._num_time_blocks)\n\n self._init_duals_ineq: BlockVector = BlockVector(self._num_time_blocks)\n self._duals_ineq: BlockVector = BlockVector(self._num_time_blocks)\n self._delta_duals_ineq: BlockVector = BlockVector(self._num_time_blocks)\n\n self._init_duals_primals_lb: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._duals_primals_lb: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._delta_duals_primals_lb: BlockVector = BlockVector(self._num_time_blocks + 1)\n\n self._init_duals_primals_ub: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._duals_primals_ub: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._delta_duals_primals_ub: BlockVector = BlockVector(self._num_time_blocks + 1)\n\n self._init_duals_slacks_lb: BlockVector = BlockVector(self._num_time_blocks)\n self._duals_slacks_lb: BlockVector = BlockVector(self._num_time_blocks)\n self._delta_duals_slacks_lb: BlockVector = BlockVector(self._num_time_blocks)\n\n self._init_duals_slacks_ub: BlockVector = BlockVector(self._num_time_blocks)\n self._duals_slacks_ub: BlockVector = BlockVector(self._num_time_blocks)\n self._delta_duals_slacks_ub: BlockVector = BlockVector(self._num_time_blocks)\n\n self._eq_resid: BlockVector = BlockVector(self._num_time_blocks)\n self._ineq_resid: BlockVector = BlockVector(self._num_time_blocks)\n self._grad_objective: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._jac_eq: BlockMatrix = BlockMatrix(nbrows=self._num_time_blocks, nbcols=self._num_time_blocks + 1)\n self._jac_ineq: BlockMatrix = BlockMatrix(nbrows=self._num_time_blocks, nbcols=self._num_time_blocks + 1)\n self._kkt: BlockMatrix = BlockMatrix(nbrows=num_time_blocks + 1, nbcols=num_time_blocks + 1)\n self._rhs: BlockVector = BlockVector(nblocks=num_time_blocks + 1)\n\n self._setup(start_t=start_t, end_t=end_t)\n self._setup_block_vectors()\n self._setup_jacs()\n self._setup_kkt_and_rhs_structure()", "def model_prem(r):\n\n\t#- normalised radius\n\tx = r / 6371000.0\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\t#- upper crust\n\tif (r >= 6356000.0):\n\t\trho = 2.6\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 3.2\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- lower crust\n\telif (r >= 6346000.6) & (r < 6356000.0):\n\t\trho = 2.9\n\t\tvpv = 6.8\n\t\tvph = vpv\n\t\tvsv = 3.9\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- LID\n\telif (r >= 6291000.0) & (r < 6346000.6):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- LVZ\n\telif (r >= 6151000.0) & (r < 6291000.0):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- Transition zone 1\n\telif (r >= 5971000.0) & (r < 6151000.0):\n\t\trho = 7.1089 - 3.8045 * x\n\t\tvpv = 20.3926 - 12.2569 * x\n\t\tvph = vpv\n\t\tvsv = 8.9496 - 4.4597 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 2\n\telif (r >= 5771000.0) & (r < 5971000.0):\n\t\trho = 11.2494 - 8.0298 * x\n\t\tvpv = 39.7027 - 32.6166 * x\n\t\tvph = vpv\n\t\tvsv = 22.3512 - 18.5856 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 3\n\telif (r >= 5701000.0) & (r < 5771000.0):\n\t\trho = 5.3197 - 1.4836 * x\n\t\tvpv = 19.0957 - 9.8672 * x\n\t\tvph = vpv\n\t\tvsv = 9.9839 - 4.9324 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 1\n\telif (r >= 5600000.0) & (r < 5701000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 29.2766 - 23.6027 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 22.3459 - 17.2473 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- Lower mantle 2\n\telif (r >= 3630000.0) & (r < 5600000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 24.9520 - 40.4673 * x + 51.4832 * x**2 - 26.6419 * x**3\n\t\tvph = vpv\n\t\tvsv = 11.1671 - 13.7818 * x + 17.4575 * x**2 - 9.2777 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 3\n\telif (r >= 3480000.0) & (r < 3630000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 15.3891 - 5.3181 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 6.9254 + 1.4672 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Outer core\n\telif (r >= 1221000.5) & (r < 3480000.0):\n\t\trho = 12.5815 - 1.2638 * x - 3.6426 * x**2 - 5.5281 * x**3\n\t\tvpv = 11.0487 - 4.0362 * x + 4.8023 * x**2 - 13.5732 * x**3\n\t\tvph = vpv\n\t\tvsv = 0.0\n\t\tvsh = 0.0\n\t\teta = 1.0\n\n\t#- Inner Core\n\telif (r >= 0.0) & (r < 1221000.5):\n\t\trho = 13.0885 - 8.8381 * x**2\n\t\tvpv = 11.2622 - 6.3640 * x**2\n\t\tvph = vpv\n\t\tvsv = 3.6678 - 4.4475 * x**2\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def __init__(self,\n num_class=2,\n layer_nums=(3, 5, 5),\n layer_strides=(2, 2, 2),\n num_filters=(128, 128, 256),\n upsample_strides=(1, 2, 4),\n num_upsample_filters=(256, 256, 256),\n num_input_features=128,\n num_anchor_per_loc=2,\n use_groupnorm=False,\n num_groups=32,\n box_code_size=7,\n num_direction_bins=2):\n super(RPN, self).__init__()\n self._num_anchor_per_loc = num_anchor_per_loc\n self._box_code_size=box_code_size\n self._num_class=num_class\n self._num_direction_bins=num_direction_bins\n assert len(layer_nums) == 3\n assert len(layer_strides) == len(layer_nums)\n assert len(num_filters) == len(layer_nums)\n assert len(upsample_strides) == len(layer_nums)\n assert len(num_upsample_filters) == len(layer_nums)\n upsample_strides=[int(i) for i in upsample_strides]\n\n factors = []\n for i in range(len(layer_nums)):\n assert int(np.prod(\n layer_strides[:i + 1])) % upsample_strides[i] == 0\n factors.append(\n np.prod(layer_strides[:i + 1]) // upsample_strides[i])\n assert all([x == factors[0] for x in factors])\n\n # note that when stride > 1, conv2d with same padding isn't\n # equal to pad-conv2d. we should use pad-conv2d.\n block2_input_filters = num_filters[0]\n if use_groupnorm:\n BatchNorm2d = change_default_args(\n num_groups=num_groups, eps=1e-3)(GroupNorm)\n else:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n\n self.block1 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_input_features, num_filters[0], 3,\n stride=layer_strides[0],bias=False),\n BatchNorm2d(num_filters[0]),\n nn.ReLU(),)\n for i in range(layer_nums[0]):\n self.block1.add(\n nn.Conv2d(num_filters[0], num_filters[0], 3,padding=1,bias=False))\n self.block1.add(BatchNorm2d(num_filters[0]))\n self.block1.add(nn.ReLU())\n self.deconv1 = Sequential(\n nn.ConvTranspose2d(num_filters[0],num_upsample_filters[0],\n upsample_strides[0],stride=upsample_strides[0],bias=False),\n BatchNorm2d(num_upsample_filters[0]),\n nn.ReLU(),)\n self.block2 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block2_input_filters,num_filters[1],3,\n stride=layer_strides[1],bias=False),\n BatchNorm2d(num_filters[1]),\n nn.ReLU(),)\n for i in range(layer_nums[1]):\n self.block2.add(\n nn.Conv2d(num_filters[1], num_filters[1], 3, padding=1,bias=False))\n self.block2.add(BatchNorm2d(num_filters[1]))\n self.block2.add(nn.ReLU())\n self.deconv2 = Sequential(\n nn.ConvTranspose2d(num_filters[1],num_upsample_filters[1],\n upsample_strides[1],stride=upsample_strides[1],bias=False),\n BatchNorm2d(num_upsample_filters[1]),\n nn.ReLU(),)\n self.block3 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_filters[1], num_filters[2], 3, stride=layer_strides[2],bias=False),\n BatchNorm2d(num_filters[2]),\n nn.ReLU(),)\n for i in range(layer_nums[2]):\n self.block3.add(nn.Conv2d(num_filters[2], num_filters[2], 3, padding=1,bias=False))\n self.block3.add(BatchNorm2d(num_filters[2]))\n self.block3.add(nn.ReLU())\n self.deconv3 = Sequential(\n nn.ConvTranspose2d(\n num_filters[2],num_upsample_filters[2],\n upsample_strides[2],stride=upsample_strides[2],bias=False),\n BatchNorm2d(num_upsample_filters[2]),\n nn.ReLU(),)\n\n num_cls = num_anchor_per_loc * num_class\n self.conv_cls = nn.Conv2d(sum(num_upsample_filters), num_cls, 1)\n self.conv_box = nn.Conv2d(sum(num_upsample_filters), num_anchor_per_loc * box_code_size, 1)\n self.conv_dir_cls = nn.Conv2d(sum(num_upsample_filters),num_anchor_per_loc * num_direction_bins, 1)", "def set_up_orbit_correctors(ps_beg, delay, id_slice1, ds_slice, zplot, id_slices, U_core, lambdaref):\n SXSS = Chicane(3.2716, 0.362, 0.830399, delay[0])\n HXSS = Chicane(3.2, 0.3636, 0.5828, delay[1])\n\n OC2 = [CORR08, D1_SXSS, SXSS, D2_SXSS, QUAD09, CORR09]\n OC3 = [CORR15, D1_HXSS, HXSS, D2_HXSS, QUAD16, CORR16]\n\n ps_end1 = beam_transportation(ps_beg, U_core[0])\n\n # ps_end1 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the first undulator section.\n\n # The id of the slice on the axis in the second undulator section\n on_axis_id_U2 = int(id_slice1+delay[0]/ds_slice+ (8*110)*lambdaref/ds_slice) # The last part is slippage\n\n print(on_axis_id_U2)\n\n ps_end_slice1 = beam_property_along_s(ps_end1, id_slices)[0:4, :]\n ps_on_axis_2 = np.ravel(ps_end_slice1[:, on_axis_id_U2])\n\n # print(ps_on_axis_2)\n\n OC2_optimized = analyze_orbit_corrector(OC2[0], OC2[-1], OC2[1:-1], ps_on_axis_2)\n print(OC2_optimized)\n CORR08_new = Orbit_Corrector(OC2[0].length, OC2_optimized[0], OC2_optimized[2])\n CORR09_new = Orbit_Corrector(OC2[-1].length, OC2_optimized[1], OC2_optimized[3])\n\n # The whole U2 with optimized orbit correctors\n U2_new = [CORR08_new] + OC2[1:-1] + [CORR09_new] + U_core[1]\n ps_end2 = beam_transportation(ps_end1, U2_new)\n\n # ps_end2 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the second undulator section.\n\n # The id of the slice on the axis in the third undulator section\n on_axis_id_U3 = int(id_slice1+(delay[0]+delay[1])/ds_slice +(14*110*lambdaref)/ds_slice) # The last term is the slipage\n\n print(on_axis_id_U3)\n\n ps_end_slice2 = beam_property_along_s(ps_end2, id_slices)[0:4, :]\n ps_on_axis_3 = np.ravel(ps_end_slice2[ :, on_axis_id_U3])\n\n # print(ps_on_axis_3)\n\n OC3_optimized = analyze_orbit_corrector(OC3[0], OC3[-1], OC3[1:-1], ps_on_axis_3)\n print(OC3_optimized)\n CORR15_new = Orbit_Corrector(OC3[0].length, OC3_optimized[0], OC3_optimized[2])\n CORR16_new = Orbit_Corrector(OC3[-1].length, OC3_optimized[1], OC3_optimized[3])\n\n U3_new = [CORR15_new] + OC3[1:-1] + [CORR16_new] + U_core[2]\n\n Undulator_Beamline = U_core[0]+U2_new+U3_new\n\n return Undulator_Beamline", "def forward(self, x): \n pal1_sources = list()\n pal2_sources = list()\n loc_pal1 = list()\n conf_pal1 = list()\n loc_pal2 = list()\n conf_pal2 = list()\n\n # apply vgg up to conv3_3 relu\n for k in range(16):\n x = self.vgg[k](x)\n\n of1 = x\n s = self.L2Normof1(of1)\n pal1_sources.append(s)\n \n # apply vgg up to conv4_3 relu\n for k in range(16, 23):\n x = self.vgg[k](x)\n\n of2 = x\n s = self.L2Normof2(of2)\n pal1_sources.append(s)\n\n # apply vgg up to conv5_3 relu\n for k in range(23, 30):\n x = self.vgg[k](x)\n of3 = x\n s = self.L2Normof3(of3)\n pal1_sources.append(s)\n\n # apply vgg up to fc7\n for k in range(30, len(self.vgg)):\n x = self.vgg[k](x)\n of4 = x\n pal1_sources.append(of4)\n \n # apply extra layers and cache source layer outputs\n for k in range(2):\n x = F.relu(self.extras[k](x), inplace=True)\n of5 = x\n pal1_sources.append(of5)\n for k in range(2, 4):\n x = F.relu(self.extras[k](x), inplace=True)\n of6 = x\n pal1_sources.append(of6)\n\n ## fpn module\n \"\"\"\n lfpn6 = self.fpn_topdown6(of6)\n lfpn5 = self._upsample_product(self.fpn_topdown5(of6), self.fpn_latlayer5(of5))\n lfpn4 = self._upsample_product(self.fpn_topdown4(of5), self.fpn_latlayer4(of4))\n lfpn3 = self._upsample_product(self.fpn_topdown3(of4), self.fpn_latlayer3(of3))\n lfpn2 = self._upsample_product(self.fpn_topdown2(of3), self.fpn_latlayer2(of2))\n lfpn1 = self._upsample_product(self.fpn_topdown1(of2), self.fpn_latlayer1(of1))\n\n\n ef1 = self.fpn_fem3_3(lfpn1)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem4_3(lfpn2)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem5_3(lfpn3)\n ef3 = self.L2Normef3(ef3)\n\n ef4 = self.fpn_fem7(lfpn4)\n ef5 = self.fpn_fem6_2(lfpn5)\n ef6 = self.fpn_fem7_2(lfpn6)\n \"\"\"\n\n conv7 = F.relu(self.fpn_topdown[0](of6), inplace=True)\n x = F.relu(self.fpn_topdown[1](conv7), inplace=True)\n conv6 = F.relu(self._upsample_product(x, self.fpn_latlayer[0](of5)), inplace=True)\n\n x = F.relu(self.fpn_topdown[2](conv6), inplace=True)\n convfc7_2 = F.relu(self._upsample_product(x, self.fpn_latlayer[1](of4)), inplace=True)\n\n x = F.relu(self.fpn_topdown[3](convfc7_2), inplace=True)\n conv5 = F.relu(self._upsample_product(x, self.fpn_latlayer[2](of3)), inplace=True)\n\n x = F.relu(self.fpn_topdown[4](conv5), inplace=True)\n conv4 = F.relu(self._upsample_product(x, self.fpn_latlayer[3](of2)), inplace=True)\n\n x = F.relu(self.fpn_topdown[5](conv4), inplace=True)\n conv3 = F.relu(self._upsample_product(x, self.fpn_latlayer[4](of1)), inplace=True)\n\n ef1 = self.fpn_fem[0](conv3)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem[1](conv4)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem[2](conv5)\n ef3 = self.L2Normef3(ef3)\n ef4 = self.fpn_fem[3](convfc7_2)\n ef5 = self.fpn_fem[4](conv6)\n ef6 = self.fpn_fem[5](conv7)\n\n pal2_sources = (ef1, ef2, ef3, ef4, ef5, ef6)\n\n ## first shot \n for (x, l, c) in zip(pal1_sources, self.loc_pal1, self.conf_pal1):\n loc_pal1.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal1.append(c(x).permute(0, 2, 3, 1).contiguous())\n \n ## second shot\n for (x, l, c) in zip(pal2_sources, self.loc_pal2, self.conf_pal2):\n loc_pal2.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal2.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n # first shot\n loc_pal1 = torch.cat([o.view(o.size(0), -1) for o in loc_pal1], 1)\n conf_pal1 = torch.cat([o.view(o.size(0), -1) for o in conf_pal1], 1)\n \n # second shot\n loc_pal2 = torch.cat([o.view(o.size(0), -1) for o in loc_pal2], 1)\n conf_pal2 = torch.cat([o.view(o.size(0), -1) for o in conf_pal2], 1)\n\n if self.phase == 'test':\n # 测试时, 仅使用shot2 的输出\n output = self.detect(\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n self.softmax(conf_pal2.view(conf_pal2.size(0), -1,\n self.num_classes)), # conf preds\n )\n else:\n ## 训练时,使用shot1 和 shot2 的输出\n output = (\n loc_pal1.view(loc_pal1.size(0), -1, 4),\n conf_pal1.view(conf_pal1.size(0), -1, self.num_classes),\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n conf_pal2.view(conf_pal2.size(0), -1, self.num_classes))\n return output", "def postprocessing(net, initial_marking, final_marking, A, B, pairs, loop_one_list):\n label_transition_dict = {}\n for label in loop_one_list:\n label_transition_dict[label] = PetriNet.Transition(label, label)\n net.transitions.add(label_transition_dict[label])\n\n # F L1L\n # Key is specific loop element\n for key, value in A.items():\n if key in B:\n A_without_B = value - B[key]\n B_without_A = B[key] - value\n pair = (A_without_B, B_without_A)\n for pair_try in pairs:\n in_part = pair_try[0]\n out_part = pair_try[1]\n if pair[0].issubset(in_part) and pair[1].issubset(out_part):\n pair_try_place = PetriNet.Place(str(pair_try))\n add_arc_from_to(label_transition_dict[key], pair_try_place, net)\n add_arc_from_to(pair_try_place, label_transition_dict[key], net)\n return net, initial_marking, final_marking", "def forward(self, x):\n # l1\n #print(\"INIT SIZE\", torch.cuda.max_memory_allocated())\n #print(\"L1\")\n #print(\"input\", x.shape)\n e1 = self.ec_init(x)\n #print(\"init\", e1.shape)\n syn1 = self.ec11(e1) # init right - l1\n #print(\"syn1\", syn1.shape)\n #print(\"L2\")\n e2 = self.bilinear(syn1, 32, 32, size=self.sizes[2]) # l1-2\n #print(\"e2\", e2.shape)\n # l2\n syn2 = self.ec22(e2) # right l2 (concat later)\n #print(\"syn2\", syn2.shape)\n del e1, e2\n e3 = self.bilinear(syn2, 32, 32, size=self.sizes[3]) # l2-3\n #print(\"L3\")\n #print(\"e3\", e3.shape)\n # l3\n syn3 = self.ec33(e3) # right l3 (concat later)\n #print(\"syn3\", syn3.shape)\n del e3 # delete\n #print(\"L4\")\n e41 = self.bilinear(syn3, 32, 64, size=self.sizes[4]) # l3-l4\n #print(\"e41\", e41.shape)\n\n # l4\n e42 = self.ec441(e41) # right 1 l4\n #print(\"e42\", e42.shape) \n syn4 = self.ec442(e42) # right 2 l4 (concat later)\n #print(\"syn4\", syn4.shape)\n del e41, e42\n #print(\"L5\")\n e51 = self.bilinear(syn4, 64, 128, size=self.sizes[5]) # l4-l5\n #print(\"e51\", e51.shape)\n # l5\n e52 = self.ec551(e51) # right 1\n #print(\"e52\", e52.shape)\n syn5 = self.ec552(e52) # right 2\n #print(\"syn5\", syn5.shape)\n del e51, e52\n #print(\"L6\")\n e61 = self.bilinear(syn5, 128, 128, size=self.sizes[6]) # l5-l6\n #print(\"e61\", e61.shape)\n \n # l6\n e62 = self.ec661(e61) # right 1\n #print(\"e62\", e62.shape)\n syn6 = self.ec662(e62) # right 2\n #print(\"syn6\", syn6.shape)\n del e61, e62\n #print(\"L7\")\n e71 = self.bilinear(syn6, 128, 256, size=self.sizes[7]) #l6-7\n #print(\"e71\", e71.shape)\n \n # l7\n e72 = self.ec771(e71) # right 1 (green)\n #print(\"e72\", e72.shape)\n syn7 = self.ec772(e72) # right 2 (turq)\n #print(\"syn7\", syn7.shape)\n del e71, e72\n\n #print(\"L8\")\n\n #e_bottom_left = self.bilinear(syn7, 256, 4092, size=self.sizes[8]) # l7-l8\n e_bottom_left = self.bilinear(syn7, 256, 256, size=self.sizes[8]) # l7-l8\n #print(\"e_b_l\", e_bottom_left.shape)\n\n # l8 - the very bottom most encoded\n e_bottom_left = e_bottom_left.view(e_bottom_left.size(0), -1)\n batch_size = e_bottom_left.size()[0]\n e_bottom_right = self.ec88(e_bottom_left)\n # TODO - change the view so that 1st arg is batch size again\n e_bottom_right = e_bottom_right.view(batch_size, e_bottom_right.size(1), 1,1,1)\n #print(\"e_b_r\", e_bottom_right.shape)\n\n #print(\"SIZE BEFORE DEL\", torch.cuda.max_memory_allocated())\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n #print(\"SIZE AFTER DEL\", torch.cuda.max_memory_allocated())\n\n ## DECODE ##\n #print(\"TO CONCAT:\")\n #print(\"Shape1\", self.bilinear(e_bottom_right, 4096, 256, size=self.sizes2[7]).shape)\n #print(\"Shape1\", self.bilinear(e_bottom_right, 256, 256, size=self.sizes2[7]).shape)\n #print(\"syn7 \", syn7.shape)\n # QUESTION - check this is a simple cat - says \"copy and stack\"\n #d71 = torch.cat((self.bilinear(e_bottom_right, 4096, 256, size=self.sizes2[7]), syn7), dim=1) # concat on level 7\n d71 = torch.cat((self.bilinear(e_bottom_right, 256, 256, size=self.sizes2[7]), syn7), dim=1) # concat on level 7\n #print(\"d71 (post cat)\", d71.shape)\n del e_bottom_left, e_bottom_right\n d72 = self.dc77(d71) # move right on level 7 (decode)\n #print(\"d72 (decoded)\", d72.shape)\n del d71, syn7\n\n # TODO - finish\n d61 = torch.cat((self.bilinear(d72, 256, 128, size=self.sizes2[6]), syn6), dim=1)\n del d72, syn6\n d62 = self.dc66(d61)\n\n d51 = torch.cat((self.bilinear(d62, 128, 128, size=self.sizes2[5]), syn5), dim=1)\n del d61, d62, syn5\n d52 = self.dc55(d51)\n\n d41 = torch.cat((self.bilinear(d52, 128, 64, size=self.sizes2[4]), syn4), dim=1)\n del d51, d52, syn4\n d42 = self.dc44(d41)\n\n d31 = torch.cat((self.bilinear(d42, 64, 32, size=self.sizes2[3]), syn3), dim=1)\n del d41, d42, syn3\n d32 = self.dc33(d31)\n\n d21 = torch.cat((self.bilinear(d32, 32, 32, size=self.sizes2[2]), syn2), dim=1)\n del d31, d32, syn2\n d22 = self.dc22(d21)\n\n d11 = torch.cat((self.bilinear(d22, 32, 32, size=self.sizes2[1]), syn1), dim=1)\n del d21, d22, syn1\n d12 = self.dc11(d11)\n return d12\n \"\"\"\n del d11\n # QUESTION\n # is this right or is there only 1 rightward step at top layer\n d0 = self.dc10(d12)\n return d0\n \"\"\"", "def forward(self, Ca, mask, residue_idx, chain_labels):\n if self.augment_eps > 0:\n Ca = Ca + self.augment_eps * torch.randn_like(Ca)\n\n D_neighbors, E_idx, mask_neighbors = self._dist(Ca, mask)\n\n Ca_0 = torch.zeros(Ca.shape, device=Ca.device)\n Ca_2 = torch.zeros(Ca.shape, device=Ca.device)\n Ca_0[:,1:,:] = Ca[:,:-1,:]\n Ca_1 = Ca\n Ca_2[:,:-1,:] = Ca[:,1:,:]\n\n V, O_features = self._orientations_coarse(Ca, E_idx)\n \n RBF_all = []\n RBF_all.append(self._rbf(D_neighbors)) #Ca_1-Ca_1\n RBF_all.append(self._get_rbf(Ca_0, Ca_0, E_idx)) \n RBF_all.append(self._get_rbf(Ca_2, Ca_2, E_idx))\n\n RBF_all.append(self._get_rbf(Ca_0, Ca_1, E_idx))\n RBF_all.append(self._get_rbf(Ca_0, Ca_2, E_idx))\n\n RBF_all.append(self._get_rbf(Ca_1, Ca_0, E_idx))\n RBF_all.append(self._get_rbf(Ca_1, Ca_2, E_idx))\n\n RBF_all.append(self._get_rbf(Ca_2, Ca_0, E_idx))\n RBF_all.append(self._get_rbf(Ca_2, Ca_1, E_idx))\n\n\n RBF_all = torch.cat(tuple(RBF_all), dim=-1)\n\n\n offset = residue_idx[:,:,None]-residue_idx[:,None,:]\n offset = gather_edges(offset[:,:,:,None], E_idx)[:,:,:,0] #[B, L, K]\n\n d_chains = ((chain_labels[:, :, None] - chain_labels[:,None,:])==0).long()\n E_chains = gather_edges(d_chains[:,:,:,None], E_idx)[:,:,:,0]\n E_positional = self.embeddings(offset.long(), E_chains)\n E = torch.cat((E_positional, RBF_all, O_features), -1)\n \n\n E = self.edge_embedding(E)\n E = self.norm_edges(E)\n \n return E, E_idx", "def third_step(self, plan, dlvl_sag_img, rphase_sag_img, root_cor_sequence, cor_sequence, sag_sequence):\n \"\"\" Represents the column in the register matrix that represents the coronal sequence used.\n It's used to populate the registration matrix correctly \"\"\"\n column = self.cor_sequences.index(cor_sequence)\n # print('(Step 3) Column: {}\\n'.format(column))\n\n # Respiratory patterns linked to the sequence containing the root image\n lpatterns = [x for x in self.dataset if int(x.split('-')[1]) == cor_sequence]\n # print('(Step 3) Patterns coronal: {} ({})\\n'.format(lpatterns, len(lpatterns)))\n\n # Respiratory patterns linked to the sequence containing the root sagittal image\n pattern = [p for p in lpatterns if int(p.split('-')[3]) == sag_sequence][0]\n # print(\"(Step 3) Pattern: {}\\n\".format(pattern))\n # print(\"(Step 3) DL: {}\\n\".format(dlvl_sag_img))\n # print(\"(Step 3) DL[pos]: {}\\n\".format(dlvl_sag_img[column]))\n\n \"\"\" Get the diaphragmatic level of each image of the analyzed coronal sequence (parallel\n to the root coronal sequence) that crosses the sagittal image registered in the second\n step \"\"\"\n pts_pattern = self.pattern_coronal('{}.png'.format(pattern))\n diaph_lvl = [max(x) for x in self.diaphragmatic_level_coronal(pts_pattern)]\n resp_phase = self.respiratory_phase_coronal(self.diaphragmatic_level_coronal(pts_pattern))\n # print(\"(Step 3) DL sag: {}\\n\".format(dlvl_sag_img))\n # print(\"(Step 3) DL cor: {}\\n\".format(diaph_lvl))\n # print(\"(Step 3) Respiratory phase: {} ({})\\n\".format(resp_phase, len(resp_phase)))\n\n \"\"\" Check register condition:\n 1) If there is same diaphragmatic level \"\"\"\n index_imgs_registered = list() # Store index of the coronal registered images\n for index, i in enumerate(diaph_lvl):\n if i == dlvl_sag_img[column]:\n index_imgs_registered.append(index)\n # print(\"(Step 3) Index of registered images: {} ({})\\n\".format(index_imgs_registered, len(index_imgs_registered)))\n\n \"\"\" Check register condition:\n 2) If the instants are in the same respiratory phase \"\"\"\n for index, i in enumerate(resp_phase):\n if index in index_imgs_registered:\n if resp_phase[index] != rphase_sag_img[column]:\n index_imgs_registered.remove(index)\n # print(\"(Step 3) Index of registered images: {} ({})\\n\".format(index_imgs_registered, len(index_imgs_registered)))\n # c = raw_input(\"?\")\n\n # If there is no registered image\n if len(index_imgs_registered) == 0:\n # return -1, -1, -1\n\n index_imgs_registered =\\\n self.third_step_second_attempt(\n diaph_lvl=diaph_lvl,\n dlvl_sag_img=dlvl_sag_img,\n rphase_sag_img=rphase_sag_img,\n cor_sequence=cor_sequence,\n option=True)\n\n if len(index_imgs_registered) == 0:\n return -1, -1, -1\n\n # Get first sagittal image that was registered with root image\n if len(index_imgs_registered) > 0:\n imgnum = index_imgs_registered[0]\n # print(\"(Step 3) Imagem: {}\\n\".format(imgnum))\n # print(\"(Step 3) DL[pos]: {}\\n\".format(diaph_lvl[imgnum]))\n\n # Fills the matrices\n for i in range(self.matrows):\n pts_pattern = self.pattern_coronal('{}.png'.format(lpatterns[i]))\n diaph_lvl = [max(x) for x in self.diaphragmatic_level_coronal(pts_pattern)]\n # resp_phase =\\\n # self.respiratory_phase_sagittal(self.diaphragmatic_level_sagittal(pts_pattern))\n\n \"\"\" By analyzing a green points:\n - Red points: Appears if a temporal register is found \"\"\"\n if self.matRegistration[i, column] == 0.0 and len(index_imgs_registered) > 0:\n self.matDL[i, column] = diaph_lvl[imgnum]\n self.matRegistration[i, column] = self.yellow\n self.matRP[i, column] = resp_phase[i]\n\n # elif self.matRegistration[i, column] == 2.0 and len(index_imgs_registered) > 0:\n # self.matRegistration[i, column] = self.red\n\n # print(\"(Step 3) Diaphragmatic level matrix:\\n{}\\n\".format(self.matDL))\n # print(\"(Step 3) Registration matrix:\\n{}\\n\".format(self.matRegistration))\n # print(\"(Step 3 Respiratory phase:\\n{}\\n\".format(self.matRP))\n\n # print(\"(Step 3) Diaphragmatic level matrix:\\n{}\\n\".format(self.matDL))\n # print(\"(Step 3) Registration matrix:\\n{}\\n\".format(self.matRegistration))\n # print(\"(Step 3) Respiratory phase:\\n{}\\n\".format(self.matRP))\n\n imgnum = imgnum + 1\n dlvl = [self.matDL[i, column] for i in range(len(self.sag_sequences))]\n rphase = [int(self.matRP[i, column]) for i in range(len(self.sag_sequences))]\n # print(\"(Step 3) Registered mage: {}\\n\".format(imgnum))\n # print(\"(Step 3) DL: {}\\n\".format(dlvl))\n\n return imgnum, dlvl, rphase", "def to_revolute_chain(self):\n T_zero = {\"p0\": SE3.identity()}\n ang_lims_map = {}\n old_to_new_names = {\n \"p0\": \"p0\"\n } # Returned for user of the method (to map old joint names to new ones)\n ub, lb = spherical_angle_bounds_to_revolute(self.ub, self.lb)\n count = 1\n joint_prev = \"p0\"\n for (\n joint\n ) in self.d: # Assumes the dictionary is in chain order (perhaps enforce?)\n new_node1 = \"p\" + str(count)\n count += 1\n # ub[new_node1] = self.ub[joint]\n # lb[new_node1] = self.lb[joint]\n ang_lims_map[joint] = new_node1\n\n new_node2 = \"p\" + str(count)\n count += 1\n old_to_new_names[joint] = new_node2\n\n Ry = SE3(SO3(roty(np.pi / 2)), np.zeros(3))\n T_zero[new_node1] = T_zero[joint_prev].dot(Ry)\n d = self.d[joint]\n Ry_back = SE3(SO3(roty(-np.pi / 2)), np.zeros(3))\n T_zero[new_node2] = T_zero[new_node1].dot(Ry_back).dot(trans_axis(d, \"z\"))\n\n joint_prev = new_node2\n\n # for key in T_zero:\n # if key not in ub.keys() and key is not 'p0':\n # ub[key] = np.pi\n # lb[key] = -np.pi\n\n params = {\"T_zero\": T_zero, \"ub\": ub, \"lb\": lb}\n return RobotRevolute(params), old_to_new_names, ang_lims_map", "def branch_precursor(state, time, d):\n assert d[\"alpha_IL2\"] < d[\"alpha1\"] and d[\"alpha_IL2\"] < d[\"alpha2\"]\n \n th0 = state[0]\n \n th1 = state[1:(d[\"alpha1\"]+d[\"alpha1_p\"]+1)]\n th2 = state[(d[\"alpha1\"]+d[\"alpha1_p\"]+1):]\n #print(len(state), len(th1))\n ### get all cytokine secreting cells \n th1_all = np.sum(th1[-d[\"alpha1_p\"]:])\n th2_all = np.sum(th2[-d[\"alpha2_p\"]:])\n \n t_eff = th1_all+th2_all\n t_il2 = np.sum(th1[:d[\"alpha_IL2\"]]) + np.sum(th2[:d[\"alpha_IL2\"]])\n\n ### calculate cytokine concentrations\n cyto_1 = d[\"beta_cyto_1\"]*th1_all + d[\"ifn_ext\"]\n cyto_2 = d[\"beta_cyto_2\"]*th2_all + d[\"il21_ext\"]\n \n conc_il2 = d[\"rate_il2\"]*t_il2/(d[\"K_il2\"]+t_eff)\n\n # compute feedbacks\n fb1 = d[\"fb_rate1\"]*cyto_1**3/(cyto_1**3+d[\"K_1\"]**3)\n fb2 = d[\"fb_rate2\"]*cyto_2**3/(cyto_2**3+d[\"K_2\"]**3)\n ### update differantiation rate\n beta1 = d[\"beta1\"]*(1+fb1)\n beta2 = d[\"beta2\"]*(1+fb2) \n \n ### calculate probability, note that these are adjusted to beta1 beta2 so that\n # they are not necessarily \\in (0,1)\n p1, p2 = get_prob(d, beta1, beta2, cyto_1, cyto_2)\n \n #print(beta1*p1_adj/(beta1*p1_adj+beta2))\n beta1_p = d[\"beta1_p\"]\n beta2_p = d[\"beta2_p\"]\n rate_death = d[\"d_eff\"] \n \n # check for homeostasis regulation\n if d[\"crit\"] == False:\n update_t0(d, time, conc_il2, t_eff)\n elif d[\"death_mode\"] == False:\n assert d[\"crit\"] == True \n beta1_p = beta1_p*np.exp(-d[\"decay_p\"]*(time-d[\"t0\"]))\n beta2_p = beta2_p*np.exp(-d[\"decay_p\"]*(time-d[\"t0\"]))\n\n else:\n rate_death = rate_death*np.exp(time-d[\"t0\"])\n\n # this is the actual differentiation where odes are computed \n dt_th1 = diff_precursor(th1, th0, d[\"alpha1\"], beta1, beta1_p, p1, rate_death, d)\n dt_th2 = diff_precursor(th2, th0, d[\"alpha2\"], beta2, beta2_p, p2, rate_death, d)\n dt_th0 = -(beta1*p1+beta2)*th0 \n dt_state = np.concatenate(([dt_th0], dt_th1, dt_th2))\n\n return dt_state", "def Motion_estimate_reverse_1frame(ref0_frame,ref1_frame,P_frame,block_size):\n \n nb_blocks = width//block_size*height//block_size\n \n vect_field = np.array(P_frame[:nb_blocks*3])\n vect_field = vect_field.reshape((height//block_size,width//block_size,3))\n \n frame_error = DCT_inverse(np.array(P_frame[nb_blocks*3:]),offset=0)\n tar_Y = frame_error[ :sep1].reshape(height,width)\n tar_U = frame_error[sep1:sep2].reshape(height//2,width//2)\n tar_V = frame_error[sep2: ].reshape(height//2,width//2)\n \n ref_frame = [ref0_frame,ref1_frame]\n \n for X in range(0,height//block_size):\n for Y in range(0,width//block_size):\n xa, xz = X*block_size,(X+1)*block_size\n ya, yz = Y*block_size,(Y+1)*block_size\n \n ref,vx,vy = vect_field[X,Y,:]\n \n pxa, pxz = xa+vx,xz+vx\n pya, pyz = ya+vy,yz+vy\n \n patch_Y = ref_Y[ref][pxa:pxz,pya:pyz]\n patch_U = ref_U[ref][pxa//2:pxz//2,pya//2:pyz//2]\n patch_V = ref_V[ref][pxa//2:pxz//2,pya//2:pyz//2]\n \n tar_Y[xa:xz,ya:yz] += patch_Y\n tar_U[xa//2:xz//2,ya//2:yz//2] += patch_U\n tar_V[xa//2:xz//2,ya//2:yz//2] += patch_V\n\n target_frame = np.concatenate((tar_Y.flatten(),\n tar_U.flatten(),\n tar_V.flatten()))\n return target_frame", "def forward(self, x):\n # Encoder1 --block1\n encode_block1 = self.conv_encoder1(x)\n if self.residus[0] == 1:\n encode_block1 += self.residual_shortcut1(x)\n encode_pool1 = self.max_pool_encoder1(encode_block1)\n\n # Encoder2 --block2\n encode_block2 = self.conv_encoder2(encode_pool1)\n if self.residus[1] == 1:\n encode_block2 += self.residual_shortcut2(encode_pool1)\n encode_pool2 = self.max_pool_encoder2(encode_block2)\n \n # Encoder3 --block3\n encode_block3 = self.conv_encoder3(encode_pool2)\n if self.residus[2] == 1:\n encode_block3 += self.residual_shortcut3(encode_pool2) ##\n encode_pool3 = self.max_pool_encoder3(encode_block3)\n \n # Encoder4 --block4\n encode_block4 = self.conv_encoder4(encode_pool3) \n if self.residus[3] == 1:\n encode_block4 += self.residual_shortcut4(encode_pool3) ##\n encode_pool4 = self.max_pool_encoder4(encode_block4)\n\n # Transitional block \n encode_block_trans_1 = self.conv_encoder_trans_1(encode_pool4)\n encode_block_trans_1 = self.conv_encoder_trans_2(encode_block_trans_1)\n encode_block_trans_1 = self.conv_encoder_trans_3(encode_block_trans_1)\n \n encode_block_trans_1 = torch.cat((encode_pool4, encode_block_trans_1), 1) # Concatenation\n \n encode_block_trans_2 = self.conv_encoder_trans_4(encode_block_trans_1)\n encode_block_trans_2 = self.conv_encoder_trans_5(encode_block_trans_2)\n encode_block_trans_2 = self.conv_encoder_trans_6(encode_block_trans_2)\n\n middle_block = torch.cat((encode_block_trans_1, encode_block_trans_2), 1) # Concatenation\n \n convTranspose_transitional = self.convTranspose_transitional(middle_block) \n # Decoder4 --block5\n decode_block4 = torch.cat((convTranspose_transitional, encode_block4), 1) \n if self.residus[4] == 1:\n decode_block4 += self.residual_shortcut_encoder_decoder4(encode_block4)\n\n #--block6\n cat_layer3 = self.conv_decoder4(decode_block4) \n if self.residus[5] == 1:\n cat_layer3 += self.residual_shortcut_decoder4(decode_block4)\n convTranspose_decoder4 = self.convTranspose_decoder4(cat_layer3)\n \n \n # Decoder3 --block7\n decode_block3 = torch.cat((convTranspose_decoder4, encode_block3), 1)\n if self.residus[6] == 1:\n decode_block3 += self.residual_shortcut_encoder_decoder3(encode_block3)\n \n #--block8\n cat_layer2 = self.conv_decoder3(decode_block3) \n if self.residus[7] == 1:\n cat_layer2 += self.residual_shortcut_decoder3(decode_block3)\n convTranspose_decoder3 = self.convTranspose_decoder3(cat_layer2)\n \n # Decoder2 --block9\n decode_block2 = torch.cat((convTranspose_decoder3, encode_block2), 1) \n if self.residus[8] == 1:\n decode_block2 += self.residual_shortcut_encoder_decoder2(encode_block2)\n \n #--block10\n cat_layer1 = self.conv_decoder2(decode_block2)\n if self.residus[9] == 1:\n cat_layer1 += self.residual_shortcut_decoder2(decode_block2)\n convTranspose_decoder2 = self.convTranspose_decoder2(cat_layer1)\n \n # Decoder1 --block11\n decode_block1 = torch.cat((convTranspose_decoder2, encode_block1), 1) \n if self.residus[10] == 1:\n decode_block1 += self.residual_shortcut_encoder_decoder1(encode_block1)\n \n #--block12\n final_layer = self.final_layer(decode_block1)\n if self.residus[11] == 1:\n final_layer += self.residual_shortcut_final_layer(decode_block1)\n \n \n return final_layer", "def main():\n \n fname = sys.argv[1]\n fin = open(fname)\n a123 = []\n batms = []\n##### Read in old basis and vectors\n for line in fin:\n if line[0] == \"#\": continue\n \n line = line.split()\n line = [ float(x.strip()) for x in line[:3] ]\n \n if len(a123) == 3: batms.append(line); continue\n a123.append(line)\n \n fname = sys.argv[2]\n fin = open(fname)\n b123 = []\n for line in fin:\n if line[0] == \"#\": continue\n \n line = line.split()\n line = [ float(x.strip()) for x in line[:3] ]\n \n b123.append(line)\n if len(b123) == 3: break\n \n print \"... lattice vectors \\n old new \"\n for i in range(3):\n print (\" %1.4f | %1.4f | %1.4f %1.4f | %1.4f | %1.4f \" % \n (a123[0][i], a123[1][i], a123[2][i], b123[0][i], b123[1][i], b123[2][i]) )\n \n print \"... basis atoms = \"\n for i in range(len(batms)):\n print \" %1.4f %1.4f %1.4f\" % (batms[i][0], batms[i][1], batms[i][2])\n \n\n##### Read in new basis that you want to switch to\n##### Take any point q_A = (q1,q2,q3) then q_E = q1*a1_E + q2*a2_E + q3*a3_E = (x, y ,z)\n##### Hence, we can say that q_B = (p1,p2,p3) = q1*a1_B + q2*a2_B + q3*a3_B\n\n##### Writing in matrix form we can say that [ a1_E | a2_E | a3_E ]*q_A = q_E\n##### Apply the same logic to vector ai we see [b1_E | b2_E | b3_E ]*ai_B = ai_E\n##### Hence, --> ////ai_B = cbm*ae_E|\\\\\\\\\n \n a123 = [np.array(x) for x in a123] #old basis\n b123 = [np.array(x) for x in b123] #new basis\n \n B = np.transpose(b123)\n \n invB = np.linalg.inv(B)\n a123_B = [np.dot(x,invB) for x in a123]\n A_B = np.transpose(a123_B) #representation of old vectors in new space (colum wise)\n print \" ... representation of old vectors in the new basis = \"\n for i in range(3):\n print \" %1.7f | %1.7f | %1.7f\" % (A_B[0][i], A_B[1][i], A_B[2][i])\n \n##### Build 5 unit cells all around\n comb = [] #array containing unit cell coordinates\n for i1 in range(-2,2):\n for i2 in range(-2,2):\n for i3 in range(-2,2):\n comb.append([i1,i2,i3])\n \n nuc = len(comb)\n b2atms = [] #new basis atoms \n b2map = [] #new basis map\n\n for uc in comb:\n for i in range(len(batms)):\n tmp = [ uc[0]+batms[i][0], uc[1]+batms[i][1], uc[2]+batms[i][2] ] # add all basis atoms in each unit cell\n prcs = 4 # significat figures for rounding\n tmp = np.array(tmp)\n tmp = np.dot(A_B,tmp) # matrix multiplication\n tmp = np.round(tmp,prcs) \n eps = 0 #needed for round off error\n if -eps<=tmp[0]<1+eps and -eps<=tmp[1]<1+eps and -eps<=tmp[2]<1+eps: # if in first unit cell\n b2atms.append(tmp.tolist())\n b2map.append( [uc[0],uc[1],uc[2],i] ) \n \n print \"--> New basis has \" + str(len(b2atms)) + \" atoms in fractional coordinates:\"\n for i in range(len(b2atms)):\n print ( \" %1.4f %1.4f %1.4f <-- %1.0f %1.0f %1.0f|%1.0f\" % \n (b2atms[i][0], b2atms[i][1], b2atms[i][2], b2map[i][0], b2map[i][1], b2map[i][2], b2map[i][3]) )", "def skeletonize3D(data, steps = None, verbose = True):\n \n if verbose: \n print('#############################################################'); \n print('Skeletonization RC6 [convolution]');\n tstart = time.time();\n \n # detect points\n points = np.array(np.nonzero(data)).T;\n if verbose:\n print('Foreground points: %d' % points.shape[0]); \n \n #if info is not None:\n # #birth = np.zeros(data.shape, dtype = 'uint16');\n # death = np.zeros(data.shape, dtype = 'uint16');\n \n K = np.zeros(len(points.shape[0]), dtype = bool);\n \n # iterate\n if steps is None:\n steps = -1;\n\n step = 1;\n removed = 0;\n while True:\n if verbose:\n print('#############################################################');\n print('Iteration %d' % step);\n titer = time.time();\n \n # 1d istmusses on remaining points\n notK = np.logical_not(K);\n constrained[notK] = isthmus1D[convolve_3d(data, base, points[notK])];\n \n \n if verbose:\n print('-------------------------------------------------------------');\n print('Constrained %d' % constrained.sum());\n \n #if info is not None:\n # b = birth[borderpoints[:,0], borderpoints[:,1], borderpoints[:,2]];\n # bids = b == 0;\n # birth[borderpoints[bids,0], borderpoints[bids,1], borderpoints[bids,2]] = step;\n \n # sub iterations over 6 directions\n remiter = 0;\n for i in range(6):\n if verbose:\n print('-------------------------------------------------------------');\n print('Sub-Iteration %d' % i);\n tsubiter = time.time();\n \n remborder = delete[convolve3D(data, rotations[i], free_points)];\n constrained[not_constrained] = rempoints = borderpoints[remborder];\n if verbose:\n print('Matched points: %d' % len(rempoints));\n \n\n data[rempoints[:,0], rempoints[:,1], rempoints[:,2]] = 0;\n keep[borderids[remborder]] = False;\n rem = len(rempoints);\n remiter += rem;\n removed += rem;\n if verbose:\n print('Deleted points: %d' % (rem));\n print('Sub-Iteration %d time: %0.2f s' % (i, time.time() - tsubiter));\n\n #update foreground\n points = points[keep];\n if verbose:\n print('Foreground points: %d' % points.shape[0]); \n \n #death times\n #if info is not None:\n # #remo = np.logical_not(keep);\n # death[points[:,0], points[:,1], points[:,2]] = step;\n \n if verbose:\n print('-------------------------------------------------------------');\n print('Iteration time: %0.2f s' % (time.time() - titer));\n \n step += 1;\n if steps >= 0 and step >= steps:\n break\n if remiter == 0:\n break\n \n if verbose:\n print('#############################################################');\n print('Skeletonization time %0.2f s' % (time.time()-tstart));\n print('Total removed: %d' % (removed));\n print('Total remaining: %d' % (len(points)));\n \n #if info is not None:\n # #return data, points, birth, death\n # return data, points, death\n #else:\n return data, points;", "def __stage2(self, img, total_boxes, stage_status: StageStatus):\r\n\r\n num_boxes = total_boxes.shape[0]\r\n if num_boxes == 0:\r\n return total_boxes, stage_status\r\n\r\n # second stage\r\n tempimg = np.zeros(shape=(24, 24, 3, num_boxes))\r\n\r\n for k in range(0, num_boxes):\r\n tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3))\r\n\r\n tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \\\r\n img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :]\r\n\r\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\r\n tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA)\r\n\r\n else:\r\n return np.empty(shape=(0,)), stage_status\r\n\r\n tempimg = (tempimg - 127.5) * 0.0078125\r\n tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))\r\n\r\n out = self._rnet.run(tempimg1)\r\n\r\n out0 = np.transpose(out[0])\r\n out1 = np.transpose(out[1])\r\n\r\n score = out1[1, :]\r\n\r\n ipass = np.where(score > self._steps_threshold[1])\r\n\r\n total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])\r\n\r\n mv = out0[:, ipass[0]]\r\n\r\n if total_boxes.shape[0] > 0:\r\n pick = self.__nms(total_boxes, 0.7, 'Union')\r\n total_boxes = total_boxes[pick, :]\r\n total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))\r\n total_boxes = self.__rerec(total_boxes.copy())\r\n\r\n return total_boxes, stage_status", "def run():\n step = 0\n o2r = 4 #orange to red delay time\n r2g = 2 #red to green delay time\n A_4235 = 0\n B_4235 = 1\n C_4235 = 2\n AB1_4235 = 3\n AB2_4235 = 4\n AC1_4235 = 5\n AC2_4235 = 6\n BA1_4235 = 7\n BA2_4235 = 8\n BC1_4235 = 9\n BC2_4235 = 10\n CA1_4235 = 11\n CA2_4235 = 12\n CB1_4235 = 13\n CB2_4235 = 14\n A_4219 = 0\n B_4219 = 1\n C_4219 = 2\n D_4219 = 3\n E_4219 = 4\n F_4219 = 5\n G_4219 = 6\n AB1_4219 = 7\n AB2_4219 = 8\n AC1_4219 = 9\n AC2_4219 = 10\n AD1_4219 = 11\n AD2_4219 = 12\n AE1_4219 = 13\n AE2_4219 = 14\n AF1_4219 = 16\n AF2_4219 = 17\n AG1_4219 = 18\n AG2_4219 = 19\n BA1_4219 = 20\n BA2_4219 = 21\n BC1_4219 = 22\n BC2_4219 = 23\n BD1_4219 = 24\n BD2_4219 = 25\n BE1_4219 = 26\n BE2_4219 = 27\n BF1_4219 = 28\n BF2_4219 = 29\n BG1_4219 = 30\n BG2_4219 = 31\n CA1_4219 = 32\n CA2_4219 = 33\n CB1_4219 = 34\n CB2_4219 = 35\n CD1_4219 = 36\n CD2_4219 = 37\n CE1_4219 = 38\n CE2_4219 = 39\n CF1_4219 = 40\n CF2_4219 = 41\n CG1_4219 = 42\n CG2_4219 = 43\n DA1_4219 = 44\n DA2_4219 = 45\n DB1_4219 = 46\n DB2_4219 = 47\n DC1_4219 = 48\n DC2_4219 = 49\n DE1_4219 = 50\n DE2_4219 = 51\n DF1_4219 = 52\n DF2_4219 = 53\n DG1_4219 = 54\n DG2_4219 = 55\n EA1_4219 = 56\n EA2_4219 = 57\n EB1_4219 = 58\n EB2_4219 = 59\n EC1_4219 = 60\n EC2_4219 = 61\n ED1_4219 = 62\n ED2_4219 = 63\n EF1_4219 = 64\n EF2_4219 = 65\n EG1_4219 = 66\n EG2_4219 = 67\n FA1_4219 = 68\n FA2_4219 = 69\n FB1_4219 = 70\n FB2_4219 = 71\n FC1_4219 = 72\n FC2_4219 = 73\n FD1_4219 = 74\n FD2_4219 = 75\n FE1_4219 = 76\n FE2_4219 = 77\n FG1_4219 = 78\n FG2_4219 = 79\n GA1_4219 = 80\n GA2_4219 = 81\n GB1_4219 = 82\n GB2_4219 = 83\n GC1_4219 = 84\n GC2_4219 = 85\n GD1_4219 = 86\n GD2_4219 = 87\n GE1_4219 = 88\n GE2_4219 = 89\n GF1_4219 = 90\n GF2_4219 = 91\n A_4220 = 0\n B_4220 = 1\n C_4220 = 2\n D_4220 = 3\n E_4220 = 4\n AB1_4220 = 5\n AB2_4220 = 6\n AC1_4220 = 7\n AC2_4220 = 8\n AD1_4220 = 9\n AD2_4220 = 10\n AE1_4220 = 11\n AE2_4220 = 12\n BA1_4220 = 13\n BA2_4220 = 14\n BC1_4220 = 15\n BC2_4220 = 16\n BD1_4220 = 17\n BD2_4220 = 18\n BE1_4220 = 19\n BE2_4220 = 20\n CA1_4220 = 21\n CA2_4220 = 22\n CB1_4220 = 23\n CB2_4220 = 24\n CD1_4220 = 25\n CD2_4220 = 26\n CE1_4220 = 27\n CE2_4220 = 28\n DA1_4220 = 29\n DA2_4220 = 30\n DB1_4220 = 31\n DB2_4220 = 32\n DC1_4220 = 33\n DC2_4220 = 34\n DE1_4220 = 35\n DE2_4220 = 36\n EA1_4220 = 37\n EA2_4220 = 38\n EB1_4220 = 39\n EB2_4220 = 40\n EC1_4220 = 41\n EC2_4220 = 42\n ED1_4220 = 43\n ED2_4220 = 44\n A_4221 = 0\n B_4221 = 1\n C_4221 = 2\n D_4221 = 3\n E_4221 = 4\n F_4221 = 5\n AB1_4221 = 6\n AB2_4221 = 7\n AC1_4221 = 8\n AC2_4221 = 9\n AD1_4221 = 10\n AD2_4221 = 11\n AE1_4221 = 12\n AE2_4221 = 13\n AF1_4221 = 14\n AF2_4221 = 15\n BA1_4221 = 16\n BA2_4221 = 17\n BC1_4221 = 18\n BC2_4221 = 19\n BD1_4221 = 20\n BD2_4221 = 21\n BE1_4221 = 22\n BE2_4221 = 23\n BF1_4221 = 24\n BF2_4221 = 25\n CA1_4221 = 26\n CA2_4221 = 27\n CB1_4221 = 28\n CB2_4221 = 29\n CD1_4221 = 30\n CD2_4221 = 31\n CE1_4221 = 32\n CE2_4221 = 33\n CF1_4221 = 34\n CF2_4221 = 35\n DA1_4221 = 36\n DA2_4221 = 37\n DB1_4221 = 38\n DB2_4221 = 39\n DC1_4221 = 40\n DC2_4221 = 41\n DE1_4221 = 42\n DE2_4221 = 43\n DF1_4221 = 44\n DF2_4221 = 45\n EA1_4221 = 46\n EA2_4221 = 47\n EB1_4221 = 48\n EB2_4221 = 49\n EC1_4221 = 50\n EC2_4221 = 51\n ED1_4221 = 52\n ED2_4221 = 53\n EF1_4221 = 54\n EF2_4221 = 55\n FA1_4221 = 56\n FA2_4221 = 57\n FB1_4221 = 58\n FB2_4221 = 59\n FC1_4221 = 60\n FC2_4221 = 61\n FD1_4221 = 62\n FD2_4221 = 63\n FE1_4221 = 64\n FE2_4221 = 65\n \n #while traci.simulation.getMinExpectedNumber() > 0:\n while step < 600:\n traci.simulationStep()\n if step == 0:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",6)\n if step == 6:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 10:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 12:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",75)\n if step == 87:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 91:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 93:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 108:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 112:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 114:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",12)\n if step == 126:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 130:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 132:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",32)\n if step == 164:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 168:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 170:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",50)\n if step == 220:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 224:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 226:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 241:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 245:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 247:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",14)\n if step == 261:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 265:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 267:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",13)\n if step == 280:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 284:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 286:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",74)\n if step == 360:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 364:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 366:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 381:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 385:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 387:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",10)\n if step == 397:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 401:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 403:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",16)\n if step == 419:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 423:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 425:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",74)\n if step == 499:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 503:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 505:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 520:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 524:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 526:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",8)\n if step == 534:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 538:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 540:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",16)\n if step == 556:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 560:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 562:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",38)\n if step == 0:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 6)\n if step == 6:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 10:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 12:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 20)\n if step == 32:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 36:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 38:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 43:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 47:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 49:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 31)\n if step == 80:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 84:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 86:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 12)\n if step == 98:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 102:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 104:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 29)\n if step == 133:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 137:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 139:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 20)\n if step == 159:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 163:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 165:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 170:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 174:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 176:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 38)\n if step == 214:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 218:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 220:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 13)\n if step == 233:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 237:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 239:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 34)\n if step == 273:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 277:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 279:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 22)\n if step == 301:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 305:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 307:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 312:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 316:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 318:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 31)\n if step == 349:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 353:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 355:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 18)\n if step == 373:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 377:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 379:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 36)\n if step == 415:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 419:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 421:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 19)\n if step == 440:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 444:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 446:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 451:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 455:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 457:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 29)\n if step == 486:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 490:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 492:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 18)\n if step == 510:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 514:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 516:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 41)\n if step == 557:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 561:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 563:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 19)\n if step == 582:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 586:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 588:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 8)\n if step == 596:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 0:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 17)\n if step == 17:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 21:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 23:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 9)\n if step == 32:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 36:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 38:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 67)\n if step == 105:\n traci.trafficlight.setPhase(\"gneJ41\", AB1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 109:\n traci.trafficlight.setPhase(\"gneJ41\", AB2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 111:\n traci.trafficlight.setPhase(\"gneJ41\", B_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 19)\n if step == 130:\n traci.trafficlight.setPhase(\"gneJ41\", BD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 134:\n traci.trafficlight.setPhase(\"gneJ41\", BD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 136:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 16)\n if step == 152:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 156:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 158:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 11)\n if step == 169:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 173:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 175:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 63)\n if step == 238:\n traci.trafficlight.setPhase(\"gneJ41\", AD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 242:\n traci.trafficlight.setPhase(\"gneJ41\", AD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 244:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 13)\n if step == 257:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 261:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 263:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 9)\n if step == 272:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 276:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 278:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 95)\n if step == 373:\n traci.trafficlight.setPhase(\"gneJ41\", AB1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 377:\n traci.trafficlight.setPhase(\"gneJ41\", AB2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 379:\n traci.trafficlight.setPhase(\"gneJ41\", B_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 19)\n if step == 398:\n traci.trafficlight.setPhase(\"gneJ41\", BD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 402:\n traci.trafficlight.setPhase(\"gneJ41\", BD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 404:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 24)\n if step == 428:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 432:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 434:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 13)\n if step == 447:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 451:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 453:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 56)\n if step == 509:\n traci.trafficlight.setPhase(\"gneJ41\", AB1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 513:\n traci.trafficlight.setPhase(\"gneJ41\", AB2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 515:\n traci.trafficlight.setPhase(\"gneJ41\", B_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 19)\n if step == 534:\n traci.trafficlight.setPhase(\"gneJ41\", BD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 538:\n traci.trafficlight.setPhase(\"gneJ41\", BD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 540:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 22)\n if step == 562:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 566:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 568:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 20)\n if step == 588:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 592:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 594:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 6)\n if step == 0:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 9)\n if step == 9:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 13:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 15:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 34:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 38:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 40:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 20)\n if step == 60:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 64:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 66:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 24)\n if step == 90:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 94:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 96:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 9)\n if step == 105:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 109:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 111:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 130:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 134:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 136:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 30)\n if step == 166:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 170:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 172:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 28)\n if step == 200:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 204:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 206:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 225:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 229:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 231:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 8)\n if step == 239:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 243:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 245:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 32)\n if step == 277:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 281:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 283:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 27)\n if step == 310:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 314:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 316:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 15)\n if step == 331:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 335:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 337:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 14)\n if step == 351:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 355:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 357:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 22)\n if step == 379:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 383:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 385:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 24)\n if step == 409:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 413:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 415:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 11)\n if step == 426:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 430:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 432:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 14)\n if step == 446:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 450:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 452:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 30)\n if step == 482:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 486:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 488:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 26)\n if step == 514:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 518:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 520:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 8)\n if step == 528:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 532:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 534:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 18)\n if step == 552:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 556:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 558:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 577:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 581:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 583:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 9)\n if step == 592:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 596:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 598:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 2)\n\n step += 1\n\n traci.close()\n sys.stdout.flush()", "def __init__(self, n1, n2, mu1, mu2, r11, r12, r21, r22, L1, L2):\n self.n1 = n1\n self.n2 = n2\n self.mu1 = mu1\n self.mu2 = mu2\n self.r11 = r11\n self.r12 = r12\n self.r21 = r21\n self.r22 = r22\n self.L1 = L1\n self.L2 = L2\n self.State_Space = [(i, j) for i in range(self.n1+3) for j in range(self.n2+3) if i+j<=self.n1+self.n2+2] + [-1, -2, -3]\n self.write_transition_matrix()\n self.discretise_transition_matrix()", "def layer_sweep(self):\n for fixed_id, fixed_layer in enumerate(self.layers):\n if fixed_id + 1 == len(self.layers):\n break\n moving_layer = self.layers[fixed_id + 1]\n for node in moving_layer.nodes:\n self.find_neighbors(node)\n if len(node.neighbors) > 0:\n self.calculate_barycenter(node)\n else:\n node.barycenter = 0 #1000\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.barycenter, reverse=False)\n for slot, node in enumerate(sorted_nodes):\n node.slot = slot + 1\n barys = set([n.barycenter for n in sorted_nodes])\n bary_nodes = [list(filter(lambda x: x.barycenter == b, sorted_nodes)) for b in barys]\n for b in bary_nodes:\n if len(b) > 1:\n for node in b:\n if len(node.sl_neighbors) == 1:\n n_slot = node.sl_neighbors[0].slot\n if n_slot > node.slot:\n other_node = max(b, key=lambda s: s.slot)\n elif n_slot < node.slot:\n other_node = min(b, key=lambda s: s.slot)\n temp = node.slot\n node.slot = other_node.slot\n other_node.slot = temp\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.slot, reverse=False)\n moving_layer.nodes = sorted_nodes", "def nominalSensitivities():\n #Scan ranges\n ang = np.linspace(-5*.3e-3,5*.3e-3,100)\n tx = np.linspace(-.3,.3,100)\n\n #Mirror Pair Sensitivities\n pitch2 = [mirrorPair(1000,primalign=[0,0,0,0,a,0]) for a in ang]\n yaw2 = [mirrorPair(1000,primalign=[0,0,0,a,0,0]) for a in ang]\n plt.figure('Pair')\n plt.plot(ang*180/pi*60,pitch2,label='Pitch')\n plt.plot(ang*180/pi*60,yaw2,label='Yaw')\n plt.title('SLF Mirror Pair Alignment Sensitivities')\n plt.grid()\n plt.legend(loc='upper center')\n plt.xlabel('Angular Error (arcmin)')\n plt.ylabel('HPD (arcsec)')\n\n #Secondary Sensitivities\n pitch = [mirrorPair(1000,secalign=[0,0,0,0,a,0]) for a in ang/20]\n yaw = [mirrorPair(1000,secalign=[0,0,0,a,0,0]) for a in ang/20]\n roll = [mirrorPair(1000,secalign=[0,0,0,0,0,a]) for a in ang/20]\n plt.figure('SecondaryAng')\n plt.semilogy(ang/20.*180/pi*60**2,pitch,label='Pitch')\n plt.plot(ang/20.*180/pi*60**2,yaw,label='Yaw')\n plt.plot(ang/20.*180/pi*60**2,roll,label='Roll')\n plt.grid()\n plt.legend(loc='upper center')\n plt.xlabel('Angular Error (arcsec)')\n plt.ylabel('HPD (arcsec)')\n plt.xlim([-5,5])\n plt.ylim([0,3])\n plt.title('SLF Secondary Alignment Sensitivities')\n decenter = [mirrorPair(1000,secalign=[t,0,0,0,0,0]) for t in tx]\n lateral = [mirrorPair(1000,secalign=[0,t,0,0,0,0]) for t in tx]\n despace = [mirrorPair(1000,secalign=[0,0,t,0,0,0]) for t in tx]\n plt.figure('SecondaryTx')\n plt.semilogy(tx,decenter,label='Decenter')\n plt.plot(tx,despace,label='Despace')\n plt.plot(tx,lateral,label='Lateral')\n plt.grid()\n plt.legend(loc='upper center')\n plt.xlabel('Translation Error (mm)')\n plt.ylabel('HPD (arcsec)')\n plt.title('SLF Secondary Translation Sensitivities')\n \n \n #Compensating behavior...\n \n\n return [pitch2,yaw2,pitch,yaw,decenter,lateral,despace]", "def setup_pwn(name,pwndata,phase, free_radius=5, tempdir=None, emin=1.0e2, emax=1.0e5,maxroi=10,model=None,**kwargs):\n sources=yaml.load(open(pwndata))\n\n catalog_name=sources[name]['catalog']\n ltcube=sources[name]['ltcube']\n pulsar_position=SkyDir(*sources[name]['dir'])\n ft2=sources[name]['ft2']\n ft1=sources[name]['ft1']\n\n # in case no list was passed\n if len(phase)==2 and isinstance(phase[0],numbers.Real) and \\\n isinstance(phase[1],numbers.Real):\n\n # write in case phase wraps around.\n if phase[0]>phase[1]:\n phase=[[phase[0],1.0],[0.0,phase[1]]]\n else:\n phase = [phase] \n\n phase_factor=get_phase_factor(phase)\n print \"phase\"\n print phase\n print \"phase_factor=%.2f\"%phase_factor\n\n catalog=FermiCatalog(e(\"$FERMI/catalogs/gll_psc_v02.fit\"),free_radius=free_radius)\n catalog_source=[i for i in catalog.get_sources(SkyDir(),180) if i.name==catalog_name][0]\n\n center=catalog_source.skydir\n\n if tempdir is None: tempdir=mkdtemp(prefix='/scratch/')\n\n binfile=j(tempdir,'binned_phased.fits')\n\n # apply phase cut to ft1 file\n phased_ft1 = j(tempdir,'ft1_phased.fits')\n phasetools.phase_cut(ft1,phased_ft1,phaseranges=phase)\n\n # create a temporary ltcube scaled by the phase factor\n# phased_ltcube=j(tempdir,'phased_ltcube.fits')\n# phase_ltcube(ltcube,phased_ltcube, phase=phase)\n phased_ltcube=ltcube\n from uw.like.pointspec import DataSpecification\n data_specification = DataSpecification(\n ft1files = phased_ft1,\n ft2files = ft2,\n ltcube = phased_ltcube,\n binfile = binfile)\n\n spectral_analysis = SpectralAnalysis(data_specification,\n binsperdec = 4,\n emin = 100,\n emax = 100000,\n irf = \"P6_V3_DIFFUSE\",\n roi_dir = center,\n maxROI = maxroi,\n minROI = maxroi)\n\n if model == None :\n roi=spectral_analysis.roi(\n roi_dir=center,\n diffuse_sources=get_default_diffuse(diffdir=e(\"$FERMI/diffuse\"),\n gfile=\"gll_iem_v02.fit\",\n ifile=\"isotropic_iem_v02.txt\"),\n catalogs = catalog,\n phase_factor = 1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n else :\n roi=spectral_analysis.roi(\n roi_dir=center,\n xmlfile = model,\n phase_factor =1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n\n print \"---------------------Energy range--------------------\"\n \n print \"emin=\"+str(roi.bands[0].emin)+\"\\n\"\n print \"emax=\"+str(roi.bands[len(roi.bands)-1].emax)+\"\\n\"\n \n\n # keep overall flux of catalog source,\n # but change the starting index to 2.\n roi.modify(which=catalog_name, name=name, index=2, \n keep_old_flux=True)\n\n return roi", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def find_transition_rates(self, state1, state2):\n if state1 in [-1, -2, -3]:\n return 0\n if state2 == -3:\n if state1[0] == self.n1 and state1[1] == self.n2 + 2:\n return self.r21 * self.mu2\n if state1[0] == self.n1 + 2 and state1[1] == self.n2:\n return self.r12 * self.mu1\n else:\n return 0\n elif state2 == -1:\n if state1[0] >= self.n1+1 and state1[1] < self.n2+2:\n return self.r11*self.mu1\n else:\n return 0\n elif state2 == -2:\n if state1[1] >= self.n2+1 and state1[0] < self.n1+2:\n return self.r22*self.mu2\n else:\n return 0\n else:\n delta = (state2[0] - state1[0], state2[1] - state1[1])\n if delta == (1, 0):\n if state1[0] < self.n1 + 1:\n return self.L1\n return 0\n if delta == (0, 1):\n if state1[1] < self.n2 + 1:\n return self.L2\n return 0\n if delta == (-1, 0):\n if state1[1] < self.n2 + 2:\n return (1 - self.r12 - self.r11) * self.mu1\n return 0\n if delta == (0, -1):\n if state1[0] < self.n1 + 2:\n return (1 - self.r21 - self.r22) * self.mu2\n return 0\n if delta == (-1, 1):\n if state1[1] < self.n2 + 2 and (state1[0], state1[1]) != (self.n1+2, self.n2):\n # if state1[1] < self.n2 + 2:\n return self.r12 * self.mu1\n return 0\n if delta == (1, -1):\n if state1[0] < self.n1 + 2 and (state1[0], state1[1]) != (self.n1, self.n2+2):\n # if state1[0] < self.n1 + 2:\n return self.r21 * self.mu2\n return 0\n return 0", "def connect_forward_and_back_v1(simulation_dict, (index0, blocks_per_dim0, predicted_array, predicted_array_t2), (index1, blocks_per_dim1), square_size, radius, context_factor):\n hidden_size = simulation_dict['hidden_size']\n dx = hidden_size\n dy = hidden_size\n logging.info(\"Connecting from index %d to index %d\" % (index0, index1))\n logging.info(\"Input layer size is %d, receiving layer size is %d\" % (blocks_per_dim0, blocks_per_dim1))\n logging.info(\"Radius of connectivity %d\" % radius)\n for x in range(blocks_per_dim1):\n for y in range(blocks_per_dim1):\n surround = get_fan_in((x, y),\n dim_x_l=blocks_per_dim0,\n dim_y_l=blocks_per_dim0,\n dim_x_u=blocks_per_dim1,\n dim_y_u=blocks_per_dim1,\n block_x=square_size,\n block_y=square_size,\n radius=radius)\n dest = index1 + x * (blocks_per_dim1) + y # destination unit\n for xy in surround:\n source = index0 + xy[0] * blocks_per_dim0 + xy[1] # source unit\n # Prepare the input and corresponding delta block at source\n input_block = simulation_dict['stage0'][source]['output_block']\n delta_block = SharedArray.SharedNumpyArray_like(input_block)\n simulation_dict['stage0'][source]['delta_blocks'].append(delta_block)\n # Prepare the context and corresonding delta block at destination\n context_block = simulation_dict['stage0'][dest]['output_block']\n delta_block2 = SharedArray.SharedNumpyArray_like(context_block)\n simulation_dict['stage0'][dest]['delta_blocks'].append(delta_block2)\n # Connect the context block to the source\n simulation_dict['stage0'][source]['context_blocks'].append((context_block, delta_block2, context_factor))\n # Prepare the predicted blocks\n xx = xy[0]*hidden_size\n yy = xy[1]*hidden_size\n assert(predicted_array[xx:xx+dx, yy:yy+dy].shape == context_block.shape)\n predicted_block = SharedArray.DynamicView(predicted_array)[xx:xx+dx, yy:yy+dy]\n predicted_block2 = SharedArray.DynamicView(predicted_array_t2)[xx:xx+dx, yy:yy+dy]\n if not (predicted_block.shape == (dx, dy)):\n print predicted_block.shape\n raise\n # Connect the input to the destination together with its predicted blocks and so on.\n simulation_dict['stage0'][dest]['signal_blocks'].append((input_block, delta_block, predicted_block, predicted_block2))", "def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)", "def frame3dlin_KeMe(E,G,Kv1,Kv2,A1,A2,Iy1,Iy2,Iz1,Iz2,L,me1,me2,R=None):\n # --- Stifness matrix\n ke = np.array([\n [((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0 , -((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0] , \n [0 , ((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , ((2*Iz2+4*Iz1)*E)/L**2 , 0 , -((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , ((4*Iz2+2*Iz1)*E)/L**2] , \n [0 , 0 , ((6*Iy2+6*Iy1)*E)/L**3 , 0 , -((2*Iy2+4*Iy1)*E)/L**2 , 0 , 0 , 0 , -((6*Iy2+6*Iy1)*E)/L**3 , 0 , -((4*Iy2+2*Iy1)*E)/L**2 , 0] , \n [0 , 0 , 0 , ((Kv2+Kv1)*G)/(2*L) , 0 , 0 , 0 , 0 , 0 , -((Kv2+Kv1)*G)/(2*L) , 0 , 0] , \n [0 , 0 , -((2*Iy2+4*Iy1)*E)/L**2 , 0 , ((Iy2+3*Iy1)*E)/L , 0 , 0 , 0 , ((2*Iy2+4*Iy1)*E)/L**2 , 0 , ((Iy2+Iy1)*E)/L , 0] , \n [0 , ((2*Iz2+4*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+3*Iz1)*E)/L , 0 , -((2*Iz2+4*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+Iz1)*E)/L] , \n [-((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0 , ((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0] , \n [0 , -((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , -((2*Iz2+4*Iz1)*E)/L**2 , 0 , ((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , -((4*Iz2+2*Iz1)*E)/L**2] , \n [0 , 0 , -((6*Iy2+6*Iy1)*E)/L**3 , 0 , ((2*Iy2+4*Iy1)*E)/L**2 , 0 , 0 , 0 , ((6*Iy2+6*Iy1)*E)/L**3 , 0 , ((4*Iy2+2*Iy1)*E)/L**2 , 0] , \n [0 , 0 , 0 , -((Kv2+Kv1)*G)/(2*L) , 0 , 0 , 0 , 0 , 0 , ((Kv2+Kv1)*G)/(2*L) , 0 , 0] , \n [0 , 0 , -((4*Iy2+2*Iy1)*E)/L**2 , 0 , ((Iy2+Iy1)*E)/L , 0 , 0 , 0 , ((4*Iy2+2*Iy1)*E)/L**2 , 0 , ((3*Iy2+Iy1)*E)/L , 0] , \n [0 , ((4*Iz2+2*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+Iz1)*E)/L , 0 , -((4*Iz2+2*Iz1)*E)/L**2 , 0 , 0 , 0 , ((3*Iz2+Iz1)*E)/L]\n ])\n # --- Mass matrix\n me = np.array([\n [(me2+3*me1)/12 , 0 , 0 , 0 , 0 , 0 , (me2+me1)/12 , 0 , 0 , 0 , 0 , 0] , \n [0 , (3*me2+10*me1)/35 , 0 , 0 , 0 , (7*L*me2+15*L*me1)/420 , 0 , (9*me2+9*me1)/140 , 0 , 0 , 0 , -(6*L*me2+7*L*me1)/420] , \n [0 , 0 , (3*me2+10*me1)/35 , 0 , -(7*L*me2+15*L*me1)/420 , 0 , 0 , 0 , (9*me2+9*me1)/140 , 0 , (6*L*me2+7*L*me1)/420 , 0] , \n [0 , 0 , 0 , (me2+3*me1)/12 , 0 , 0 , 0 , 0 , 0 , (me2+me1)/12 , 0 , 0] , \n [0 , 0 , -(7*L*me2+15*L*me1)/420 , 0 , (3*L**2*me2+5*L**2*me1)/840 , 0 , 0 , 0 , -(7*L*me2+6*L*me1)/420 , 0 , -(L**2*me2+L**2*me1)/280 , 0] , \n [0 , (7*L*me2+15*L*me1)/420 , 0 , 0 , 0 , (3*L**2*me2+5*L**2*me1)/840 , 0 , (7*L*me2+6*L*me1)/420 , 0 , 0 , 0 , -(L**2*me2+L**2*me1)/280] , \n [(me2+me1)/12 , 0 , 0 , 0 , 0 , 0 , (3*me2+me1)/12 , 0 , 0 , 0 , 0 , 0] , \n [0 , (9*me2+9*me1)/140 , 0 , 0 , 0 , (7*L*me2+6*L*me1)/420 , 0 , (10*me2+3*me1)/35 , 0 , 0 , 0 , -(15*L*me2+7*L*me1)/420] , \n [0 , 0 , (9*me2+9*me1)/140 , 0 , -(7*L*me2+6*L*me1)/420 , 0 , 0 , 0 , (10*me2+3*me1)/35 , 0 , (15*L*me2+7*L*me1)/420 , 0] , \n [0 , 0 , 0 , (me2+me1)/12 , 0 , 0 , 0 , 0 , 0 , (3*me2+me1)/12 , 0 , 0] , \n [0 , 0 , (6*L*me2+7*L*me1)/420 , 0 , -(L**2*me2+L**2*me1)/280 , 0 , 0 , 0 , (15*L*me2+7*L*me1)/420 , 0 , (5*L**2*me2+3*L**2*me1)/840 , 0] , \n [0 , -(6*L*me2+7*L*me1)/420 , 0 , 0 , 0 , -(L**2*me2+L**2*me1)/280 , 0 , -(15*L*me2+7*L*me1)/420 , 0 , 0 , 0 , (5*L**2*me2+3*L**2*me1)/840]\n ])\n\n if (R is not None):\n RR = scipy.linalg.block_diag(R,R,R,R)\n me = np.transpose(RR).dot(me.dot(RR))\n ke = np.transpose(RR).dot(ke.dot(RR))\n\n return ke, me", "def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out", "def main():\n \n #\n # Initialization\n #\n ref_time = time.time()\n output_string = '' \n cv2.namedWindow('frame', cv2.WINDOW_GUI_NORMAL+cv2.WINDOW_AUTOSIZE)\n \n #\n # Open the capture device and print some\n # useful properties\n #\n cap = cv2.VideoCapture(0)\n if cap.isOpened():\n #cap.set(cv.CV_CAP_PROP_FRAME_WIDTH, 320)\n #cap.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 240)\n \n frameWidth = cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)\n frameHeight = cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)\n \n print 'frame: width {}, height {}'.format(frameWidth, frameHeight)\n\n #\n # Parameters for Lucas-Kanade optical flow\n #\n lk_params = dict( winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n #\n # Predefine points to track\n #\n track_points = np.array([[[220.0, 120.0]],\n [[220.0, 200.0]],\n [[220.0, 280.0]],\n [[220.0, 360.0]],\n [[420.0, 120.0]],\n [[420.0, 200.0]],\n [[420.0, 280.0]],\n [[420.0, 360.0]]], 'float32')\n \n #\n # Take first frame and find corners in it\n #\n cap_ok, frame = cap.read()\n if not cap_ok:\n sys.exit()\n\n prev_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n print 'rel_time,p0dx,p0dy,p1dx,p1dy,p2dx,p2dy,p3dx,p3dy,p4dx,p4dy,p5dx,p5dy,p6dx,p6dy,p7dx,p7dy'\n\n while(True):\n\n cap_ok, frame = cap.read()\n if not cap_ok:\n break\n \n curr_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n #\n # Calculate optical flow\n #\n next_points, st, err = cv2.calcOpticalFlowPyrLK(prev_frame, curr_frame, track_points, None, **lk_params)\n\n #\n # Iterate through points and display on video frame\n # as well as output a CSV formated value list\n #\n for point_index in range(0, track_points.shape[0]):\n \n #\n # Display results on video frame\n #\n track_point = np.int0(track_points[point_index])\n x0,y0 = track_point.ravel()\n cv2.circle(frame, (x0,y0), 5, (0,255,0), -1)\n\n next_point = np.int0(next_points[point_index])\n x1,y1 = next_point.ravel()\n cv2.circle(frame, (x1,y1), 5, (0,0,255), -1)\n\n #\n # Build CSV string\n #\n output_string += ',{:.2f},{:.2f}'.format(x0-x1, y0-y1)\n \n #\n # Print out some data in a CSV format for graphing\n #\n now = time.time() - ref_time \n print '{:.2f}{}'.format(now, output_string)\n output_string = ''\n\n #\n # Display result and check for escape key\n #\n cv2.imshow('frame',frame)\n k = cv2.waitKey(1) & 0xff\n if k == 27:\n break\n\n #\n # Now update the previous frame and previous points\n #\n prev_frame = curr_frame.copy()\n\n cv2.destroyAllWindows()\n cap.release()", "def phosphorene_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [-s/2, -ay/2, h], 0),\n ('B', [ s/2, -ay/2, 0], 0),\n ('C', [-s/2 + ax/2, 0, 0], 0),\n ('D', [ s/2 + ax/2, 0, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5')\n )\n\n return lat", "def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state", "def nodal2D_steady_fixed_source(Dims,Lengths,BCs,D,Sigma,Q, tolerance=1.0e-12, phi_solution=0., LOUD=False, maxits=100):\n I = Dims[0]\n J = Dims[1]\n K = Dims[2]\n L = I*J*K\n Nx = Lengths[0]\n Ny = Lengths[1]\n Nz = Lengths[2]\n \n hx,hy,hz = np.array(Lengths)/np.array(Dims)\n ihx2,ihy2,ihz2 = (1.0/hx**2,1.0/hy**2,1.0/hz**2)\n\n if (type(phi_solution) != np.ndarray):\n phi_solution = np.zeros((2,I,J,5))\n phi_new = phi_solution.copy()\n iteration = 1\n converged = 0\n localBCs = np.ones((2,3))\n\n #reshape Q if necessary\n if Q.shape != (I,J,K,5):\n Q_new = np.zeros((I,J,K,5))\n Q_new[:,:,:,0] = Q[:,:,:]\n Q = Q_new\n\n #iterate over the x directions\n k=0\n while not(converged):\n \n #Solve for x direction\n d = 0 #solv direction\n tr_id = 1 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(i==0):\n phi_left = phi_solution[d,i-1,j,:]\n C = positive_current(phi_left,hx/2,hx,D[i-1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[0,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(i==(I-1)):\n phi_rt = phi_solution[d,i+1,j,:]\n C = negative_current(phi_rt,-hx/2,hx,D[i+1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[1,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if i==0:\n nbr_ids = [i,i,i+1] #Assume constant along left edge\n elif i==(I-1):\n nbr_ids = [i-1,i,i] #assume constant along right edge\n else:\n nbr_ids = [i-1,i,i+1] #interior cell\n\n if not j==(J-1):\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n else:\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n #Ltop_quad = (0., 0, 0)\n\n if not j==0:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n else:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n #Lbot_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n# print(\"\\n X Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n\n Q_local = np.array(Q[i,j,k,:])\n for dof in range(len(Ltop_quad)):\n Q_local[dof] -= 1/hy*(Ltop_quad[dof] - Lbot_quad[dof])\n\n# print(\"The transverse leakage magnitude is: \",-1./hy*(Ltop_quad[0] - Lbot_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n #Compute the new x fluxes\n phi_new[0,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hx,localBCs)\n phi,a1,a2,a3,a4 = phi_new[0,i,j,:]\n# print(\"The reaction magnitude: \", phi_new[0,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hx*(current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) - current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k])))\n# print(\"\")\n\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[0,i-1,j,:],hx/2,hx,D[i-1,j,k]),\n negative_current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[0,i+1,j,:],-hx/2,hx,D[i+1,j,k]),\n positive_current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n \n #Solve for y direction\n d = 1 #solv direction\n tr_id = 0 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(j==0):\n phi_left = phi_solution[d,i,j-1,:]\n C = positive_current(phi_left,hy/2,hy,D[i,j-1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[2,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(j==(J-1)):\n phi_rt = phi_solution[d,i,j+1,:]\n C = negative_current(phi_rt,-hy/2,hy,D[i,j+1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[3,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if j==0:\n nbr_ids = [j,j,j+1] #Assume constant along left edge\n elif j==(J-1):\n nbr_ids = [j-1,j,j] #assume constant along right edge\n else:\n nbr_ids = [j-1,j,j+1] #interior cell\n\n if not i==(I-1):\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n else:\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n\n if not i==0:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n else:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n #Llft_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n Q_local = np.array(Q[i,j,k,:])\n# print(\"\\n Y Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n for dof in range(len(Lrgt_quad)):\n Q_local[dof] -= 1/hx*(Lrgt_quad[dof] - Llft_quad[dof])\n# print(\"The transverse leakage magnitude is: \",-1./hx*(Lrgt_quad[0] - Llft_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n phi_new[1,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hy,localBCs)\n# print(\"The reaction magnitude: \", phi_new[1,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hy*(current(phi_new[1,i,j,:],hy/2,hy,D[i,j,k]) - current(phi_new[1,i,j,:],-hy/2,hy,D[i,j,k])))\n# print(\"\")\n phi,a1,a2,a3,a4 = phi_new[1,i,j,:]\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[i-1,:],h/2,h,D[i]),negative_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[i+1,:],-h/2,h,D[i]),positive_current(phi_new[i,:],h/2,h,D[i]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n# print(\"X solution\", phi_new[0,:,:,0])\n# print(\"Y solution\", phi_new[1,:,:,0])\n\n #Compute total change in x and y\n relchange = np.linalg.norm( np.reshape(phi_new-phi_solution, 5*I*J*K*2))/np.linalg.norm( np.reshape(phi_new, 5*I*J*K*2))\n reldiff = np.linalg.norm( np.reshape(phi_new[0,:,:,0] - phi_new[1,:,:,0], I*J*K)/np.linalg.norm( np.reshape(phi_new[0,:,:,0],I*J*K)) )\n converged = (relchange < tolerance) or (iteration >= maxits)\n if (LOUD):\n print(\"Iteration\",iteration,\": relative change total =\",relchange,\"relative difference X Y\",reldiff)\n iteration += 1 \n phi_solution = phi_new.copy()\n\n\n x = np.linspace(hx*.5,Nx-hx*.5,I)\n y = np.linspace(hy*.5,Ny-hy*.5,J)\n z = np.linspace(hz*.5,Nz-hz*.5,K)\n return x,y,z,phi_solution[0,:,:,0].reshape(I,J,1)#+phi_solution[1,:,:,0].reshape(I,J,1)))", "def hinf_project_pole_alloc(A, B1, B2, C1, C2, D11, D12, D21, D22, q, r, solver=cvx.SCS):\n \n assert r > 0, 'r must be positive.'\n assert np.abs(q) + r < 1, 'the region must be inside the unit circle.'\n \n tol = 1e-20\n n = A.shape[0]\n \n L = cvx.Variable((B2.shape[1], n))\n P = cvx.Variable((n, n))\n gamma2 = cvx.Variable()\n \n LMI1 = cvx.bmat([\n [P, A*P + B2*L, B1, np.zeros((B1.shape[0], D11.shape[0]))],\n [P*A.T + L.T * B2.T, P, np.zeros((P.shape[0], B1.shape[1])), P*C1.T + L.T*D12.T],\n [B1.T, np.zeros((B1.shape[1], P.shape[1])), np.eye(B1.shape[1]), D11.T],\n [np.zeros((C1.shape[0], B1.shape[0])), C1*P + D12*L, D11, gamma2*np.eye(D11.shape[0])]\n ])\n \n cons1 = LMI1 >> tol\n \n LMI2 = cvx.bmat([\n [-r*P, -q*P + A*P + B2*L],\n [-q*P + P*A.T + L.T*B2.T, -r*P]\n ])\n \n cons2 = LMI2 << -tol\n \n cons3 = gamma2 >= tol\n \n cons4 = P == P.T\n \n cons5 = P >> tol\n \n prob = cvx.Problem(cvx.Minimize(gamma2), constraints=[cons1, cons2, cons3, cons4, cons5])\n prob.solve(solver=solver)\n \n status = prob.status\n if not status in [cvx.OPTIMAL_INACCURATE, cvx.OPTIMAL]:\n #variable.value will be None, better trow an exception\n raise OptException(f'Problem is {status}')\n \n Hinf_norm = np.sqrt(gamma2.value)\n Pop = P.value\n Lop = L.value\n \n K = Lop.dot(np.linalg.inv(Pop))\n \n return K, Hinf_norm, Pop, status", "def kspace_cholesky_solve(self, other, complx = False, n_points = None):\n if n_points is None:\n \n m1n = np.max(np.abs(self.coords), axis = 0)\n m2n = np.max(np.abs(other.coords), axis = 0)\n \n n_points = np.max([m1n,m2n], axis = 0) \n\n nx,ny,nz = 2*n_points + 1\n\n #print(\"kspace_svd_solve: \", nx,ny,nz)\n m1x,m1y = self.blocks.shape[1], self.blocks.shape[2]\n m2x,m2y = other.blocks.shape[1], other.blocks.shape[2]\n \n coords = np.roll(lattice_coords(n_points).reshape(nx,ny,nz, 3), -n_points, axis = (0,1,2)).reshape(nx*ny*nz, 3)\n\n #print(coords)\n\n \n m1r = self.cget(coords).reshape(nx,ny,nz,m1x,m1y)\n m2r = other.cget(coords).reshape(nx,ny,nz,m2x,m2y)\n M1 = np.fft.fftn(m1r, axes = (0,1,2))\n M2 = np.fft.fftn(m2r, axes = (0,1,2))\n M3 = np.zeros((nx,ny,nz,m1x, m2y),dtype = np.complex128)\n\n for c in coords:\n \n Mk = np.linalg.cholesky(M1[c[0], c[1], c[2]])\n yk = np.linalg.solve(Mk, M2[c[0], c[1], c[2]])\n\n\n\n \n\n M3[c[0], c[1], c[2]] = np.linalg.solve(Mk.conj().T, yk)\n\n\n\n \n\n ret = tmat()\n if complx:\n ret.load_nparray(np.fft.ifftn(M3.reshape(nx,ny,nz,m1x,m2y), axes = (0,1,2)).reshape(coords.shape[0], m1x,m2y), coords)\n else:\n ret.load_nparray(np.fft.ifftn(M3.reshape(nx,ny,nz,m1x,m2y), axes = (0,1,2)).real.reshape(coords.shape[0], m1x,m2y), coords)\n return ret", "def prog(args):\r\n i_fname, o_fname, pedestal_params, split_list, Num_W = args\r\n mesh = stl.mesh.Mesh.from_file(i_fname)\r\n #rotate mesh since by default the rotation axis is along X\r\n mesh.rotate([0,1,0],np.pi/2)\r\n\r\n v_arr = np.round(np.vstack(mesh.vectors).astype(float), decimals=1)\r\n\r\n splt0_arr = np.array(split_list)\r\n splt1_arr = np.roll(splt0_arr,-1)\r\n\r\n pos = cf.cartesian2cylyndrical(v_arr, Num_W)\r\n\r\n #make splits\r\n pos_list=[]\r\n for splt0, splt1 in zip(splt0_arr[:-1], splt1_arr[:-1]):\r\n pos_idx = np.where((splt0<=pos[:,:,2]) & (splt1>pos[:,:,2]))[0]\r\n print(splt0, splt1)\r\n #pos = [r, th, z] sectionwise\r\n pos_list.append(pos[pos_idx])\r\n #add pedestal mesh\r\n\r\n for sect_num, pos in enumerate(pos_list):\r\n pos = cf.add_pedestal(pos, pedestal_params)\r\n profiles=np.zeros_like(pos)\r\n\r\n for i in np.arange(np.shape(pos)[0]):\r\n profiles[i] = cf.cylyndrical2cartesian(pos[i])\r\n\r\n strokes = np.flipud(np.rot90(profiles))\r\n #transform data from longeron nodes [xyz] to:\r\n #a_arr - rotation angle around the rotation axis\r\n #r_arr - length of a segment perpenticular to the rotation axis and corresponding lateral mesh edge\r\n #z_arr - corresponding z coordiantes\r\n #v_arr - direction vector of the coresponding lateral mesh edge\r\n a_arr, r_arr, z_arr, v_arr = cf.transform(strokes, add_pedestal_bottom=True,add_pedestal_top=True)\r\n\r\n #make a summary plots\r\n cf.plot_loft_paths(profiles)\r\n cf.plot_loft_paths(pos)\r\n cf.plot_surf(a_arr,z_arr,r_arr)\r\n\r\n #collect data to the dictionary longeron wise\r\n res_dict = {'a_arr':np.rot90(a_arr, k=-1),\r\n 'r_arr':np.rot90(r_arr, k=-1),\r\n 'z_arr':np.rot90(z_arr, k=-1),\r\n 'v_arr':np.rot90(v_arr, k=-1)}\r\n\r\n #save result dictionary\r\n if not o_fname:\r\n o_fname = i_fname\r\n\r\n fname='{}_{}.pickle'.format(o_fname, sect_num)\r\n with open(fname, 'wb') as f:\r\n # Pickle the 'data' dictionary using the highest protocol available.\r\n pickle.dump(res_dict, f, pickle.HIGHEST_PROTOCOL)\r\n\r\n print(fname, ' saved')", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def standardBlock(solver):\n #Create and fill shared array\n createCPUSharedArray(solver,numpy.zeros(solver.sharedShape,dtype=solver.dtype).nbytes)\n for i in range(solver.intermediate):\n solver.sharedArray[i,:,solver.operating:-solver.operating,solver.operating:-solver.operating] = solver.initialConditions[solver.globalBlock]\n solver.sharedArray[i,:,solver.operating:-solver.operating,:solver.operating] = solver.initialConditions[solver.globalBlock[0],solver.globalBlock[1],-solver.operating-1:-1]\n solver.sharedArray[i,:,solver.operating:-solver.operating,-solver.operating:] = solver.initialConditions[solver.globalBlock[0],solver.globalBlock[1],1:solver.operating+1]\n #Create phase objects\n solver.standard = geometry.Geometry() \n solver.standard.setAdjustment(solver.operating)\n #Setting up GPU\n if solver.gpuBool:\n # Creating cuda device and context\n cuda.init()\n cuda_device = cuda.Device(solver.gpuRank)\n solver.cuda_context = cuda_device.make_context()\n setupGPUStandard(solver)\n #Setup CPU\n setupCPUStandard(solver)\n solver.comm.Barrier() #Ensure all processes are", "def ridge_joint(trn_fs,trn_data,alphas,val_fs=None,val_data=None,square_alphas=False,chunk_sz=5000,is_verbose=True):\n\n n_resp, n_voxels = trn_data.shape\n n_channels = np.sum([tfs.shape[1] for tfs in trn_fs])\n n_chunks = np.ceil(n_voxels/np.float(chunk_sz)).astype(np.int32)\n\n if square_alphas:\n alphas = [a**2 for a in alphas]\n\n num_train_points = float(list(trn_data.shape)[0])\n num_val_points = float(list(val_data.shape)[0])\n\n n_abc = [np.minimum(*t_fs.shape) for t_fs in trn_fs]\n ######################################################################################\n ### --- First up: compute modified covariance matrix & scaled/rotated stimulus --- ###\n ######################################################################################\n # Perform SVD on training sets for all three models \n if is_verbose:\n print(\"computing SVD\")\n U_trn,W_trn,Vt_trn = [],[],[]\n for t_fs in trn_fs:\n uu,ww,vv = np.linalg.svd(trn_stim_A, full_matrices=False)\n U_trn.append(uu)\n W_trn.append(ww)\n Vt_trn.append(vv)\n\n # The square of Ws (the singular values from the SVD) are the eigenvalues of the covariance matrix but have not been divided by n-1.\n L = [ww**2/float(num_train_points-1) for ww in W_trn]\n\n ### --- IDK WTF. Ask Wendino. --- ###\n ## to change: make sure that Ws are in the right units (divided by n-1) when bootstrapping, so that alphas are already in correct units\n ## at that point you can change the lines below and not divide alpha by (n-1)\n # TO DO: make this more than one line for clarity.\n w_alpha_trn = [np.diag(np.sqrt(1./(LL + aa)/(num_train_points-1))) for LL,aa in zip(L,alphas)]\n #w1_alpha_trn = sqrt(1./(L1+ alphas_A2[0]/(num_train_points-1))) \n #w1_alpha_trn = diag(w1_alpha_trn) #%turn it from an array to a matrix\n\n # Create & combine rotated & scaled stimulus space \n X_prime_trn_t = [ww.dot(vv).dot(t_fs) for ww,vv,t_fs in zip(W_trn,Vt_trn,trn_fs)]\n #S1_prime_trn_t = np.dot(np.dot(w1_alpha_trn, Vt1_trn), trn_stim_A.T) #w1_alpha_trn = 1200x1200, Vt1_trn = 1200x1200, trn_stim_A.T = 1200x3737\n Xcomb_prime_trn_t = np.vstack(X_prime_trn_t)\n\n # Create & modify covariance matrix\n stim_cov_mat_r = X_prime_trn_t.dot(X_prime_trn_t.T) / float(num_train_points-1)\n cov_diag = np.sqrt(np.diag(stim_cov_mat_r))\n full_mat_cov_diag = np.tile(cov_diag, [cov_diag.shape[0], 1])\n # re-do w/ simpler syntax?\n all_divisor = np.multiply(full_mat_cov_diag.T, full_mat_cov_diag) \n corr_mat_r = np.divide(stim_cov_mat_r, all_divisor)\n\n ### --- Clean up the correlation matrix to have zeros where we know they exist and use that data to set a threshold --- ###\n idx_ct = np.cumsum([0]+n_abc)\n idxs = [(a,b) for a,b in zip(idx_ct[:-1],idx_ct[1:])]\n # Block diagonal components of covariance matrix\n for n,(ii,jj) in zip(n_abc,idxs):\n corr_mat_r[ii:jj] = np.eye(n)\n # Off-diagonal elements: ignore for now? \n #for i1,i2 in zip(idxs[:-1],idxs[1:]):\n # (ii,jj),(kk,ll) = i1,i2\n \n\n # ##### --- WORKING HERE - SEE IPYTHON NOTEBOOK --- #########\n # upper_right_corr = np.ravel(corr_mat_r[0:nA, nA:])\n # middle_right_corr = np.ravel(corr_mat_r[nA:(nA+nB),(nA+nB):])\n # right_corr = np.hstack([upper_right_corr, middle_right_corr])\n # s_right_corr = argsort(right_corr)\n # # WTF is this?\n # #corr_cutoff = 954 # WH magic number; something to do with the fact that it's needless to have \n # # ALL the block-diagonal diagonals, since we have limited data\n # #goodcorrs_idx = np.hstack([s_right_corr[0:corr_cutoff], s_right_corr[-1:-(corr_cutoff+1):-1]])\n\n # new_right_corrs = np.squeeze(np.zeros([s_right_corr.shape[0],1]))\n # #new_right_corrs[goodcorrs_idx] = right_corr[goodcorrs_idx]\n # new_upper_right_corrs = np.reshape(new_right_corrs[0:(nB+nC)*nA],[nA,nB+nC])\n # new_lower_left_corrs = new_upper_right_corrs.T\n # new_middle_right_corrs = np.reshape(new_right_corrs[(nB+nC)*nA:],[nB,nC])\n # new_middle_left_corrs = new_middle_right_corrs.T\n\n # ##NEED TO CHANGE THIS: REMOVE HARDCODED MATRIX SIZES\n # new_corr_mat_r = copy.copy(corr_mat_r)\n # new_corr_mat_r[0:nA, nA:]= new_upper_right_corrs\n # new_corr_mat_r[nA:(nA+nB), (nA+nB):] = new_middle_right_corrs\n # new_corr_mat_r[(nA+nB):, nA:(nA+nB)]= new_middle_left_corrs # More like bottom middle\n # new_corr_mat_r[nA:,0:nA] = new_lower_left_corrs # \n # new_corr_mat_r[0:nA,0:nA]= np.identity(nA)\n # new_corr_mat_r[nA:(nA+nB), nA:(nA+nB)] = np.identity(nB)\n # new_corr_mat_r[(nA+nB):,(nA+nB):] = np.identity(nC)\n\n #perform eigenvalue decomposition (WHAT FOR? delete this?)\n #w, v = np.linalg.eigh(new_corr_mat_r)\n # Invert modified covariance matrix\n #corr_r_inv = np.linalg.inv(new_corr_mat_r)\n corr_r_inv = np.linalg.inv(corr_mat_r)\n #for \n ##create filter\n dot1 = np.dot(X_prime_trn_t, trn_data) #precompute for speed\n dot2 = np.dot(corr_r_inv, dot1) #precompute for speed\n # Weights\n h_123_prime = np.divide(dot2, (float(num_train_points-1)))\n\n\n ##create estimated responses from training data\n #r_hat = np.dot(X_prime_trn_t.T, h_123_prime) # not usually done...\n #if do_pred:\n #validation set results\n val_stim_A_prime = np.dot(np.dot(w1_alpha_r, Vt1_r), val_stim_A.T)\n val_stim_B_prime = np.dot(np.dot(w2_alpha_r, Vh2_r), val_stim_B.T)\n val_stim_C_prime = np.dot(np.dot(w3_alpha_r, Vh3_r), val_stim_C.T)\n #S1_prime = S1_prime[0:200,:]\n #S2_prime = S2_prime[0:200,:]\n\n S123_val_prime_t = np.vstack([val_stim_A_prime, val_stim_B_prime, val_stim_C_prime])\n\n #create validation set correlations\n r_hat_val = np.dot(S123_val_prime_t.T, h_123_prime)\n\n\n #look at performance\n valcorr = _sutils.column_corr(r_hat_val, val_data)\n out = dict(\n #weights=wt,\n #alphas=alphas,\n #n_sig_vox_byalpha=n_sig_vox_byalpha,\n cc=valcorr\n )\n return out", "def compute_controller(self):\n # region Input Info\n\n # here we implement an example for a consensus algorithm\n neig = self.get_neighbors()\n messages = self.get_messages()\n pos, rot = self.get_pos_and_orientation()\n \n #send message of positions to all neighbors indicating our position\n for n in neig:\n self.send_message(n, pos)\n \n # check if we received the position of our neighbors and compute desired change in position\n # as a function of the neighbors (message is composed of [neighbors id, position])\n dx = 0.\n dy = 0.\n # print(messages)\n # endregion\n if messages:\n # similar to laplacian but for each robot\n # for m in messages:\n # dx += m[1][0] - pos[0]\n # dy += m[1][1] - pos[1]\n\n # position of All robots\n Apos = np.zeros([6,2])\n Apos[self.id,:]=pos[0:2]\n for m in messages:\n Apos[m[0],:]=m[1][0:2]\n\n TarM = np.zeros([6,2])\n TarM[self.id, :] = self.TargetP[self.Tid,:]-pos[0:2]\n Cdiff = Apos-pos[0:2]\n Cdiff = np.sqrt(np.square(Cdiff[:,0])+np.square(Cdiff[:,1]))\n Cdiff = np.sum(Cdiff)\n Ddiff = self.P_Des-self.P_Des[self.id]\n Ddiff = np.sqrt(np.square(Ddiff[:, 0]) + np.square(Ddiff[:, 1]))\n Ddiff = np.sum(Ddiff)\n Tdiff = np.abs(Ddiff - Cdiff)\n\n\n\n\n # region Obstacles\n Obc = Apos\n # Obc = self.Obstacles\n # Obc = np.vstack([Obs,pos[0:2]])\n Diff = pos[0:2] - Obc\n for m in range(0, Diff.shape[0]):\n if (np.sqrt(np.square(Diff[m, 0]) + np.square(Diff[m, 1]))) > 0.35:\n Diff[m, :] = np.array([0, 0])\n\n DiffY = Diff[:, 1].reshape([1, -1])\n DiffX = Diff[:, 0].reshape([1, -1])\n x_odot = np.sum(np.exp(-np.square(DiffX) / self.Var) * DiffX)\n y_odot = np.sum(np.exp(-np.square(DiffY) / self.Var) * DiffY)\n\n ObsAv = np.array([x_odot, y_odot])\n # endregion\n\n\n NewGd = np.square(np.transpose(self.E) @ Apos)\n NewGd = (NewGd[:, 0] + NewGd[:, 1]).reshape([-1, 1])\n G = self.Gdsq - NewGd\n Rg = self.DistJacobian(Apos, self.Edges)\n p_ddot = np.zeros(([6, 2]))\n\n if (Tdiff < self.Thr):\n self.StartTimer = True\n\n\n if(self.StartTimer):\n self.Timer += 1\n if (self.Timer > 500+self.OffTimer):\n self.FormStable = True\n self.StartTimer = False\n self.Timer = 0\n\n if(self.Tid > 3 and np.sum(TarM[self.id, 0])<5):\n TarM[self.id, 0] = 5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) < 5):\n TarM[self.id, 1] = 5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) > -5):\n TarM[self.id, 1] = -5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) > -5):\n TarM[self.id, 1] = -5\n\n if (self.Tid > 3 and np.sum(TarM[self.id, :]) < 0.01):\n self.Tid +=1\n\n if (self.FormStable):\n # Formation Done\n if self.Tid == 0 and self.Formation == \"square\":\n self.P_Des = self.Form_HRec(0.5)\n self.Reset_Form()\n # self.Tid += 1\n # self.FormStable = False\n print(self.P_Des, self.Formation, \" \", self.Tid)\n # self.K1 = 5\n # self.K2 = 50\n if (self.Tid < self.TargetP.shape[0]-1 and self.FormStable):\n self.Tid += 1\n if(self.Tid == 1):\n self.K1 = 2\n self.K3 = 10\n self.Thr = 0.001\n if (self.Tid == 2):\n self.K1 = 20\n self.K3 = 1\n self.P_Des = self.Form_HRec(0.5)\n self.Reset_Form()\n self.FormStable = False\n # Linear Control Law\n p_dot = np.zeros([6,2])\n p_dot = -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n p_dot += self.dt * (self.OK / self.Var) * ObsAv\n # p_dot += self.K3 * TarM\n # Non - linear Control Law\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_dot += p_ddot*self.dt\n if(self.id == 0):\n # print(Tdiff,self.TargetP[self.Tid,:],np.sum(G),self.Tid,self.Timer)\n p_dot = self.K3 * TarM\n if (self.id == 0):\n print(Tdiff,self.TargetP[self.Tid,:],np.sum(G),self.Tid,self.Timer)\n # if(self.Tid == 1):\n # p_dot += -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n\n dx = p_dot[self.id, 0]\n dy = p_dot[self.id, 1]\n\n # Non - linear Control\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_ddot += (self.OK / self.Var) * ObsAv\n # dx = self.dt * p_ddot[self.id, 0]\n # dy = self.dt * p_ddot[self.id, 1]\n #else:\n # TarM[self.id, :] = Tdiff\n # # Linear Control\n # p_dot = -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n # p_dot += self.dt * (self.OK / self.Var) * ObsAv\n # p_dot += self.K3 * TarM\n # dx = p_dot[self.id, 0]\n # dy = p_dot[self.id, 1]\n\n # Non - linear Control\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_ddot += self.K3 * TarM\n # p_ddot += (self.OK / self.Var) * ObsAv\n # dx = self.dt * p_ddot[self.id, 0]\n # dy = self.dt * p_ddot[self.id, 1]\n\n # region Robot Wheel Control\n # integrate\n des_pos_x = pos[0] + self.dt * dx\n des_pos_y = pos[1] + self.dt * dy\n\n #compute velocity change for the wheels\n vel_norm = np.linalg.norm([dx, dy]) #norm of desired velocity\n if vel_norm < 0.01:\n vel_norm = 0.01\n des_theta = np.arctan2(dy/vel_norm, dx/vel_norm)\n right_wheel = np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n left_wheel = -np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n self.set_wheel_velocity([left_wheel, right_wheel])\n # endregion", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def connect_forward_and_back(simulation_dict, (index0, blocks_per_dim0, predicted_array), (index1, blocks_per_dim1), square_size, radius, context_factor):\n hidden_size = simulation_dict['hidden_size']\n dx = hidden_size\n dy = hidden_size\n logging.info(\"Connecting from index %d to index %d\" % (index0, index1))\n logging.info(\"Input layer size is %d, receiving layer size is %d\" % (blocks_per_dim0, blocks_per_dim1))\n logging.info(\"Radius of connectivity %d\" % radius)\n for x in range(blocks_per_dim1):\n for y in range(blocks_per_dim1):\n surround = get_fan_in((x, y),\n dim_x_l=blocks_per_dim0,\n dim_y_l=blocks_per_dim0,\n dim_x_u=blocks_per_dim1,\n dim_y_u=blocks_per_dim1,\n block_x=square_size,\n block_y=square_size,\n radius=radius)\n dest = index1 + x * (blocks_per_dim1) + y # destination unit\n for xy in surround:\n source = index0 + xy[0] * blocks_per_dim0 + xy[1] # source unit\n # Prepare the input and corresponding delta block at source\n input_block = simulation_dict['stage0'][source]['output_block']\n delta_block = SharedArray.SharedNumpyArray_like(input_block)\n simulation_dict['stage0'][source]['delta_blocks'].append(delta_block)\n # Prepare the context and corresonding delta block at destination\n context_block = simulation_dict['stage0'][dest]['output_block']\n delta_block2 = SharedArray.SharedNumpyArray_like(context_block)\n simulation_dict['stage0'][dest]['delta_blocks'].append(delta_block2)\n # Connect the context block to the source\n simulation_dict['stage0'][source]['context_blocks'].append((context_block, delta_block2, context_factor))\n # Prepare the predicted blocks\n xx = xy[0]*hidden_size\n yy = xy[1]*hidden_size\n assert(predicted_array[xx:xx+dx, yy:yy+dy].shape == context_block.shape)\n predicted_block = SharedArray.DynamicView(predicted_array)[xx:xx+dx, yy:yy+dy]\n if not (predicted_block.shape == (dx, dy)):\n print predicted_block.shape\n raise\n # Connect the input to the destination together with its predicted blocks and so on.\n past_block = SharedArray.SharedNumpyArray_like(input_block)\n derivative_block = SharedArray.SharedNumpyArray_like(input_block)\n integral_block = SharedArray.SharedNumpyArray_like(input_block)\n pred_block_local = SharedArray.SharedNumpyArray_like(input_block)\n simulation_dict['stage0'][dest]['signal_blocks'].append((input_block, delta_block, predicted_block, past_block, derivative_block, integral_block, pred_block_local))", "def pro_avfid_superoperator_compsubspace_phasecorrected(U,L1,phases):\n\n Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0],\n [0, np.exp(-1j*np.deg2rad(phases[1])), 0, 0, 0, 0, 0, 0, 0],\n [0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0],\n [0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0],\n [0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[3]-phases[-1])), 0, 0, 0, 0],\n [0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0],\n [0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0],\n [0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[1])), 0],\n [0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0]))]],\n type='oper',\n dims=[[3, 3], [3, 3]])\n\n if U.type=='oper':\n U=Ucorrection*U\n inner = U.dag()*U_target\n part_idx = [0, 1, 3, 4] # only computational subspace\n ptrace = 0\n for i in part_idx:\n ptrace += inner[i, i]\n dim = 4 # 2 qubits comp subspace \n\n return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1)))\n\n elif U.type=='super':\n U=qtp.to_super(Ucorrection)*U\n kraus_form = qtp.to_kraus(U)\n dim=4 # 2 qubits in the computational subspace\n part_idx = [0, 1, 3, 4] # only computational subspace\n psum=0\n for A_k in kraus_form:\n ptrace = 0\n inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch\n for i in part_idx:\n ptrace += inner[i, i]\n psum += (np.abs(ptrace))**2\n\n return np.real((dim*(1-L1) + psum) / (dim*(dim + 1)))", "def make_kinematics():\r\n # Read data values for vel, sigma, h3, h4\r\n data = np.loadtxt(outtable, usecols=(5, 7, 9, 11)).T\r\n xall, yall, sn = np.loadtxt(outtable, usecols=(1, 2, 14,)).T\r\n ###########################################################################\r\n # Details of the maps\r\n names = [r\"vel\", r\"sigma\", r\"h3\", r\"h4\"]\r\n cb_label = [r\"V$_{\\rm LOS}$ (km/s)\", r\"$\\sigma_{\\rm LOS}$ (km/s)\",\r\n r\"$h_3$\", r\"$h_4$\"]\r\n # lims = [[3750,4000], [150,500], [-0.08, 0.08], [-0.15, 0.15] ]\r\n lims = [[3640, 4040], [220, 500], [-0.08, 0.08], [-0.11, 0.11]]\r\n xcb = [0.068, 0.385, 0.705]\r\n ###########################################################################\r\n # Set the threshold S/N for smoothing\r\n # Higher values than this values are not smoothed\r\n sn_thres = [50, 50, 1000, 1000]\r\n ###########################################################################\r\n # Read values of other authors\r\n tab1a, tab1b = get_richtler()\r\n tab2 = get_ventimiglia()\r\n ###########################################################################\r\n # Set the colormap\r\n cmap = \"Spectral_r\"\r\n ###########################################################################\r\n # Loop for figures\r\n for i, vector in enumerate(data):\r\n print \"Producing figure for {0}...\".format(names[i])\r\n good = np.where(((~np.isnan(vector)) & (sn > sn_cut)))[0]\r\n sn_high = np.where(((~np.isnan(vector)) & (sn >= sn_thres[i])))[0]\r\n sn_low = np.delete(good, sn_high)\r\n vector_low = ll.loess_2d(xall[sn_low], yall[sn_low], vector[sn_low],\r\n frac=frac_loess)\r\n vector_high = vector[sn_high]\r\n good = np.hstack((sn_high, sn_low))\r\n v_loess = np.hstack((vector_high, vector_low))\r\n v = vector[good]\r\n vmin = lims[i][0] if lims[i][0] else v_loess.min()\r\n vmax = lims[i][1] if lims[i][1] else v_loess.max()\r\n fig = plt.figure(figsize=(15, 5.1))\r\n gs = gridspec.GridSpec(1, 3)\r\n gs.update(left=0.051, right=0.985, bottom=0.11, top=0.975, hspace=0.06,\r\n wspace=0.06)\r\n vs = [v, v_loess, v_loess]\r\n ylabels = [1, 0, 0]\r\n contours = [\"vband\", \"vband\", \"residual\"]\r\n cb_fmts = [\"%i\", \"%i\", \"%.2f\", \"%.2f\"]\r\n ####################################################\r\n # Produces pannels\r\n ####################################################\r\n for j in range(3):\r\n ax = plt.subplot(gs[j])\r\n # if i <1:\r\n # norm = LogNorm(vmin=vmin, vmax=vmax)\r\n # else:\r\n # norm = Normalize(vmin=vmin, vmax=vmax)\r\n norm = Normalize(vmin=vmin, vmax=vmax)\r\n coll = PolyCollection(polygons_bins[good], array=vs[j],\r\n cmap=cmap, edgecolors='w', norm=norm,\r\n linewidths=0.4)\r\n draw_map(fig, ax, coll)\r\n draw_contours(contours[j], fig, ax)\r\n plt.gca().add_patch(\r\n Rectangle((18, -36), 20, 10, alpha=1, zorder=10000,\r\n color=\"w\"))\r\n draw_colorbar(fig, ax, coll, cblabel=cb_label[i],\r\n cbar_pos=[xcb[j], 0.18, 0.08, 0.04],\r\n ticks=np.linspace(vmin, vmax, 4), cb_fmt=cb_fmts[i])\r\n xylabels(ax, y=ylabels[j])\r\n if j > 0:\r\n ax.set_yticklabels([])\r\n #####################################################\r\n # Draw long slits of other papers\r\n #####################################################\r\n if i > 1:\r\n continue\r\n bc = [\"g\", \"g\", \"b\", \"b\"]\r\n for k, tab in enumerate([tab1a, tab1b, tab2[4:], tab2[:4]]):\r\n norm = Normalize(vmin=vmin, vmax=vmax)\r\n idx = np.argsort(tab[:, 0])\r\n points = np.array([tab[:, 0][idx], tab[:, 1][idx]]).T.reshape(\r\n -1, 1, 2)\r\n segments = np.concatenate([points[:-1], points[1:]],\r\n axis=1)\r\n lc = LineCollection(segments, array=tab[:, i + 2],\r\n cmap=cmap, norm=norm, lw=5)\r\n ax.add_collection(lc)\r\n add_borders(ax, points, c=bc[k])\r\n # plt.savefig(\"figs/{0}.pdf\".format(names[i]))\r\n plt.savefig(\"figs/{0}.png\".format(names[i]))\r\n # plt.savefig(\"figs/{0}.eps\".format(names[i]), fmt=\"eps\")\r", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def initLChain(self):\n if self.lChain is None:\n self.lChain = {} # maps edge -> (r, L) where 0 <= edge <= edgeLen+1\n self.lChain[0] = (0, 0)\n self.lChain[(self.edgeLen+1)] = (self.rMax, self.maxL)\n for edge in range(1, self.edgeLen+1):\n r = 0.5 * edge\n l = int(math.ceil((edge * self.maxL)/ float(self.edgeLen + 1)))\n self.lChain[edge] = (r, l)", "def compute_matrices_A_B(self, state, action, env):\n Fe, Fs, psi = action\n theta = state[THETA]\n m = env.lander.mass\n J = env.lander.inertia\n\n sin_psi = math.sin(psi)\n cos_psi = math.cos(psi)\n sin_theta = math.sin(theta)\n cos_theta = math.cos(theta)\n\n cos_t_cos_p = cos_theta * cos_psi\n sin_t_cos_p = sin_theta * cos_psi\n sin_t_sin_p = sin_theta * sin_psi\n sin_t_cos_t = sin_theta * cos_theta\n cos_t_sin_p = cos_theta * sin_psi\n\n a_25 = (Fe * (cos_t_cos_p - sin_psi * sin_theta) - Fs * sin_theta) / m\n a_45 = (Fe * (sin_t_cos_t - cos_t_sin_p) - Fs * cos_theta) / m\n\n A = [[0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, a_25, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, a_45, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0]]\n\n b_21 = (sin_t_cos_p + cos_t_cos_p) / m\n b_22 = cos_theta / m\n b_23 = -Fe * sin_t_sin_p / m\n\n b_41 = (cos_t_cos_p - sin_t_sin_p) / m\n b_42 = -sin_theta / m\n b_43 = Fe * (-cos_t_sin_p - sin_t_cos_p) / m\n\n b_61 = -sin_psi * L1 / J\n b_62 = L2 / J\n b_63 = -Fe * cos_psi * L1 / J\n\n B = [[0, 0, 0],\n [b_21, b_22, b_23],\n [0, 0, 0],\n [b_41, b_42, b_43],\n [0, 0, 0],\n [b_61, b_62, b_63]]\n\n return np.array(A), np.array(B)", "def __init__(self, _sequence, _structure_offset,\n _invert_y, _invert_init_angle, _reverse_actuation,\n _bot_color, _top_color,\n _name,\n _r1, _r2,\n _theta1, _theta2,\n _leg_length):\n\n # Define sequence\n self.sequence = _sequence\n self.structure_offset = _structure_offset\n self.invert_y = _invert_y\n self.bot_color = _bot_color\n self.top_color = _top_color\n self.name = _name\n self.invert_init_angle = _invert_init_angle\n if _reverse_actuation:\n self.invert_init_angle = not self.invert_init_angle\n\n # Create first block\n _d_bot = np.arccos(_theta1) * _r1 / 2\n _d_top = np.arccos(_theta2) * _r2 / 2\n _d_mid = 1 / 100\n _w = 5.5 / 100\n _h = 5.5 / 100\n\n _center = Coordinate(x=0, y=_d_bot - (_h / 2))\n self.block_bot = Block(\n _width=_w,\n _height=_h,\n _center=_center,\n _anchor_d=_d_bot,\n _color=self.bot_color,\n _type='bottom'\n )\n\n # Create mid block\n _center = Coordinate(x=0, y=_r1 - _d_mid + (_h / 2))\n self.block_mid = Block(\n _width=_w,\n _height=_h,\n _center=_center,\n _anchor_d=_d_mid,\n _color=Utils.black,\n _type='middle'\n )\n\n # Create top block\n _center = Coordinate(x=0, y=self.block_mid.get_anchor(type=\"t\").y + _r2 - _d_top + (_h/2))\n self.block_top = Block(\n _width=_w,\n _height=_h,\n _center=_center,\n _anchor_d=_d_top,\n _color=self.top_color,\n _type='top'\n )\n\n # Create the bars_bot\n self.bars_bot = Arm(\n self.block_bot.get_anchor(type=\"t\"),\n self.block_mid.get_anchor(type=\"b\"),\n _r1,\n self.block_bot.get_anchor_distance()\n )\n\n # Create the bars_top\n self.bars_top = Arm(\n self.block_mid.get_anchor(type='t'),\n self.block_top.get_anchor(type='b'),\n _r2,\n self.block_mid.get_anchor_distance()\n )\n\n # Create the spring_bot\n self.spring_bot = Spring(\n _P=Coordinate(x=0, y=self.block_bot.get_anchor(type='t').y),\n _Q=Coordinate(x=0, y=self.block_mid.get_anchor(type='b').y)\n )\n\n # Create the spring_top\n self.spring_top = Spring(\n _P=Coordinate(x=0, y=self.block_mid.get_anchor(type='t').y),\n _Q=Coordinate(x=0, y=self.block_top.get_anchor(type='b').y)\n )\n\n # Compute Theta_s - limits of the angle for the bar.\n self.theta_s_bot = np.arccos(2 * self.block_bot.anchor_d / self.bars_bot.length)\n self.theta_s_top = np.arccos(2 * self.block_mid.anchor_d / self.bars_top.length)\n\n self.theta_i_bot = 0\n self.theta_i_top = 0\n\n self.leg_length = _leg_length\n\n self.A = []\n self.B = []\n self.C = []\n\n self.ground_distance = 0.0\n\n self.init_position()", "def step2(self):\n\t\tif self.b[self.k - 1] == 'a':\n\t\t\tif self.ends(\"ational\"): self.r(\"ate\")\n\t\t\telif self.ends(\"tional\"): self.r(\"tion\")\n\t\telif self.b[self.k - 1] == 'c':\n\t\t\tif self.ends(\"enci\"):\t self.r(\"ence\")\n\t\t\telif self.ends(\"anci\"): self.r(\"ance\")\n\t\telif self.b[self.k - 1] == 'e':\n\t\t\tif self.ends(\"izer\"):\t self.r(\"ize\")\n\t\telif self.b[self.k - 1] == 'l':\n\t\t\tif self.ends(\"bli\"):\t self.r(\"ble\") # --DEPARTURE--\n\t\t\t# To match the published algorithm, replace this phrase with\n\t\t\t#\tif self.ends(\"abli\"):\t self.r(\"able\")\n\t\t\telif self.ends(\"alli\"): self.r(\"al\")\n\t\t\telif self.ends(\"entli\"): self.r(\"ent\")\n\t\t\telif self.ends(\"eli\"):\t self.r(\"e\")\n\t\t\telif self.ends(\"ousli\"): self.r(\"ous\")\n\t\telif self.b[self.k - 1] == 'o':\n\t\t\tif self.ends(\"ization\"): self.r(\"ize\")\n\t\t\telif self.ends(\"ation\"): self.r(\"ate\")\n\t\t\telif self.ends(\"ator\"): self.r(\"ate\")\n\t\telif self.b[self.k - 1] == 's':\n\t\t\tif self.ends(\"alism\"):\t self.r(\"al\")\n\t\t\telif self.ends(\"iveness\"): self.r(\"ive\")\n\t\t\telif self.ends(\"fulness\"): self.r(\"ful\")\n\t\t\telif self.ends(\"ousness\"): self.r(\"ous\")\n\t\telif self.b[self.k - 1] == 't':\n\t\t\tif self.ends(\"aliti\"):\t self.r(\"al\")\n\t\t\telif self.ends(\"iviti\"): self.r(\"ive\")\n\t\t\telif self.ends(\"biliti\"): self.r(\"ble\")\n\t\telif self.b[self.k - 1] == 'g': # --DEPARTURE--\n\t\t\tif self.ends(\"logi\"):\t self.r(\"log\")\n\t\t# To match the published algorithm, delete this phrase", "def _init_layers(self):\n cls_branch = []\n for _ in range(self.num_reg_fcs):\n cls_branch.append(Linear(self.embed_dims, self.embed_dims))\n cls_branch.append(nn.LayerNorm(self.embed_dims))\n cls_branch.append(nn.ReLU(inplace=True))\n cls_branch.append(Linear(self.embed_dims, self.cls_out_channels))\n fc_cls = nn.Sequential(*cls_branch)\n\n reg_branch = []\n for _ in range(self.num_reg_fcs):\n reg_branch.append(Linear(self.embed_dims, self.embed_dims))\n reg_branch.append(nn.ReLU())\n reg_branch.append(Linear(self.embed_dims, self.code_size))\n reg_branch = nn.Sequential(*reg_branch)\n\n past_traj_reg_branch = []\n for _ in range(self.num_reg_fcs):\n past_traj_reg_branch.append(\n Linear(self.embed_dims, self.embed_dims))\n past_traj_reg_branch.append(nn.ReLU())\n past_traj_reg_branch.append(\n Linear(self.embed_dims, (self.past_steps + self.fut_steps)*2))\n past_traj_reg_branch = nn.Sequential(*past_traj_reg_branch)\n\n def _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n # last reg_branch is used to generate proposal from\n # encode feature map when as_two_stage is True.\n num_pred = (self.transformer.decoder.num_layers + 1) if \\\n self.as_two_stage else self.transformer.decoder.num_layers\n\n if self.with_box_refine:\n self.cls_branches = _get_clones(fc_cls, num_pred)\n self.reg_branches = _get_clones(reg_branch, num_pred)\n self.past_traj_reg_branches = _get_clones(\n past_traj_reg_branch, num_pred)\n else:\n self.cls_branches = nn.ModuleList(\n [fc_cls for _ in range(num_pred)])\n self.reg_branches = nn.ModuleList(\n [reg_branch for _ in range(num_pred)])\n self.past_traj_reg_branches = nn.ModuleList(\n [past_traj_reg_branch for _ in range(num_pred)])\n if not self.as_two_stage:\n self.bev_embedding = nn.Embedding(\n self.bev_h * self.bev_w, self.embed_dims)", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def get_things1(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_max):\n # Initializing the arrays\n points_3d = []\n points_2d = []\n camera_ind = []\n points_ind = []\n cam_params = []\n\n dst_3d = kp_3d\n dst_2d = kp_2d\n src_3d = map_3d\n src_2d = map_2d\n src_cam = map_cam\n low_bound = []\n up_bound = []\n my_min = 0\n\n # Updating the Camera parameters in map and setting the bounds for the update \n for i in range(my_min,my_max+1):\n cam_param = [map_view[i,0], map_view[i,1], map_view[i,2], map_view[i,3], map_view[i,4], map_view[i,5], f,0,0]\n cam_params.append(cam_param)\n\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n \n # Updating the Camera parameters for frame and setting the bounds for the update\n r = (R.from_matrix((H[0:3, 0:3]))).as_rotvec()\n t = H[:,3]\n cam_param = [r[0], r[1], r[2], t[0], t[1], t[2], f, 0, 0]\n cam_params.append(cam_param)\n \n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n\n new_cam = len(cam_params)-1\n cam_params = np.array(cam_params).reshape(-1,9)\n count = 0\n \n # listing variables to iterate \n l1 = []\n l2 = []\n count = 0\n \n for m in comp_list:\n count+=1\n l1.append(m.queryIdx)\n l2.append(m.trainIdx)\n\n l1 = np.array(l1).reshape(1,-1)\n l2 = np.array(l2).reshape(1,-1)\n l = np.vstack((l1,l2))\n l_fin = l[:,l[1, :].argsort()]\n j = 0\n count = len(points_3d)\n prev = -1\n final_l1 = []\n final_l2 = []\n final_des = []\n\n # Iterating through the list made and making sure no duplicates\n while(j<(len(l_fin[0]))):\n i1 = l_fin[0,j]\n i2 = l_fin[1,j]\n if(i2!=prev):\n # Map points insertion\n \n check = 0\n for ii in range(len(src_2d[i1])):\n m_2d = src_2d[i1][ii]\n check = 1\n ind = int(src_cam[i1][ii])\n points_2d.append([int((m_2d[0]%(2*cx))-cx), int((m_2d[1]%(2*cy))-cy),0])\n\n points_ind.append(count)\n camera_ind.append(ind)\n final_l1.append(i1)\n final_l2.append(0)\n \n # Taking Mean Desciptor if needed un comment 2 lines below\n # x = ((map_des[i1]*len(src_2d[i1]))+des[i2])/(len(src_2d[i1])+1)\n # map_des[i1] = x\n \n if(check==1):\n # Frame points insersion\n points_2d.append([int((dst_2d[i2,0])-cx), int((dst_2d[i2,1])-cy), 0])\n points_ind.append(count)\n camera_ind.append(new_cam)\n final_l1.append(i2)\n final_l2.append(1)\n wld_pt = src_3d[i1]\n points_3d.append([wld_pt[0], wld_pt[1], wld_pt[2]])\n prev = i2\n count = len(points_3d)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n src_2d[i1].append([int((dst_2d[i2,0])), int((dst_2d[i2,1]))])\n j+=1\n \n # Final Output\n cam_params = np.array(cam_params).reshape(-1,9)\n points_3d = np.array(points_3d)\n points_2d = np.array(points_2d)\n camera_ind = np.array(camera_ind).reshape(len(camera_ind))\n points_ind = np.array(points_ind).reshape(len(points_ind))\n final_l1 = np.array(final_l1)\n final_l2 = np.array(final_l2)\n return cam_params, points_3d, points_2d, camera_ind, points_ind, final_l1, final_l2, low_bound, up_bound, map_des, src_2d", "def solve_step(self, bc_left=0):\n status = 0\n self.t += self.dt\n\n\n ### Construct the RHS vector\n # Implicit terms\n #cff1 = 0. # Fully implicit\n #cff2 = 0.\n cff1 = 0.5*(1. - 2.*self.c_im)*self.dt\n cff2 = 0.5*self.c_im*self.dt\n RHS = cff1*self.L_rhs.dot(self.B) +\\\n cff2*self.L_rhs.dot(self.B_n_m1)\n\n # Nonlinear (explicit) terms\n cff3 = self.dt*(3 + self.b_ex)*0.5\n cff4 = -self.dt*(1+2*self.b_ex)*0.5\n cff5 = self.dt*(self.b_ex)*0.5\n \n RHS += cff3*self.calc_nonlinear_rhs(self.B)\n RHS += cff4*self.calc_nonlinear_rhs(self.B_n_m1)\n RHS += cff5*self.calc_nonlinear_rhs(self.B_n_m2)\n\n # Other terms from the time-derivative\n RHS += self.B\n\n # Add the BCs to the RHS\n cff0 = 0.5*(1 + self.c_im)*self.dt\n self.add_bcs(RHS, bc_left, cff0, cff1, cff2)\n\n # Use the direct banded matrix solver (faster)\n self.B_n_p1[:] = la.solve_banded( (self._j,self._j), self.L_lhs.data[::-1,:], RHS)\n\n # Check solutions\n if np.any( np.isnan(self.B_n_p1)):\n return -1\n\n # Update the terms last\n self.B_n_m2[:] = self.B_n_m1\n self.B_n_m1[:] = self.B\n self.B[:] = self.B_n_p1\n\n ## Update the boundary terms in these equations\n self.bcs[2] = self.bcs[1]\n self.bcs[1] = self.bcs[0]\n self.bcs[0] = bc_left\n\n return status", "def __init__(self, chainFilename, nSamplesFromTOF, nBins_eD = 100, nBins_x = 20, nRuns = 4):\n self.chain, self.probs, self.nParams, self.nWalkers, self.nSteps = readChainFromFile(chainFilename)\n \n self.nRuns = nRuns\n \n self.eD_bins = nBins_eD\n self.eD_minRange = 200.0\n self.eD_maxRange = 1200.0\n self.eD_range = (self.eD_minRange, self.eD_maxRange)\n self.eD_binSize = (self.eD_maxRange - self.eD_minRange)/self.eD_bins\n self.eD_binCenters = np.linspace(self.eD_minRange + self.eD_binSize/2,\n self.eD_maxRange - self.eD_binSize/2,\n self.eD_bins)\n self.eD_binMax = self.eD_bins - 1\n \n self.x_bins = nBins_x\n self.x_minRange = 0.0\n self.x_maxRange = distances.tunlSSA_CsI.cellLength\n self.x_range = (self.x_minRange,self.x_maxRange)\n self.x_binSize = (self.x_maxRange - self.x_minRange)/self.x_bins\n self.x_binCenters = np.linspace(self.x_minRange + self.x_binSize/2,\n self.x_maxRange - self.x_binSize/2,\n self.x_bins)\n \n # parameters for making the fake data...\n self.nEvPerLoop = nSamplesFromTOF\n self.nSamplesFromTOF = nSamplesFromTOF\n self.data_x = np.repeat(self.x_binCenters, self.nEvPerLoop)\n \n \n self.ddnXSinstance = ddnXSinterpolator()\n self.beamTiming = beamTimingShape()\n self.zeroDegTimeSpreader = zeroDegreeTimingSpread()\n \n # stopping power model and parameters\n stoppingMedia_Z = 1\n stoppingMedia_A = 2\n stoppingMedia_rho = 8.565e-5 # from red notebook, p 157\n incidentIon_charge = 1\n stoppingMedia_meanExcitation = 19.2*1e-3\n dgas_materialDef = [stoppingMedia_Z, stoppingMedia_A, stoppingMedia_rho, stoppingMedia_meanExcitation]\n #stoppingModel = ionStopping.simpleBethe( stoppingModelParams )\n self.stoppingModel = ionStopping.simpleBethe([incidentIon_charge])\n self.stoppingModel.addMaterial(dgas_materialDef)\n \n \n self.eN_binCenters = getDDneutronEnergy( self.eD_binCenters )\n \n \n tofWindowSettings = tofWindows()\n tof_nBins = tofWindowSettings.nBins\n self.tof_minRange = [tofWindowSettings.minRange['mid'], \n tofWindowSettings.minRange['close'], \n tofWindowSettings.minRange['close'],\n tofWindowSettings.minRange['far'],\n tofWindowSettings.minRange['production'] ]\n self.tof_maxRange = [tofWindowSettings.maxRange['mid'], \n tofWindowSettings.maxRange['close'], \n tofWindowSettings.maxRange['close'],\n tofWindowSettings.maxRange['far'],\n tofWindowSettings.maxRange['production'] ]\n self.tof_range = []\n for minR,maxR in zip(self.tof_minRange, self.tof_maxRange):\n self.tof_range.append((minR,maxR))\n self.tofRunBins = [tof_nBins['mid'], tof_nBins['close'], \n tof_nBins['close'], tof_nBins['far'], tof_nBins['production']]\n \n self.standoffs = [distances.tunlSSA_CsI.standoffMid, \n distances.tunlSSA_CsI.standoffClose,\n distances.tunlSSA_CsI.standoffClose,\n distances.tunlSSA_CsI.standoffFar,\n distances.tunlSSA_CsI.standoff_TUNLruns]\n \n self.tofData = None\n self.neutronSpectra = None\n \n \n self.paramNames = ['$E_0$', '$f_1$', '$f_2$', '$f_3$', '$N_1$',\n '$N_2$', '$N_3$', '$N_4$', '$N_5$']", "def perform_phase_processing(LP_solver=\"pyglpk\"):\n radar = pyart.testing.make_single_ray_radar()\n phidp, kdp = pyart.correct.phase_proc_lp(radar, 0.0, LP_solver=LP_solver)\n return radar, phidp, kdp", "def runSimulation(self, R=5000, N=1,s=1000, method='RL'):\n global n_ec\n import numpy as np\n import matplotlib.pyplot as plt\n import matplotlib\n matplotlib.use('Agg')\n matplotlib.pyplot.switch_backend('agg')\n\n plt.rcParams.update({'font.size':20})\n plt.rc('xtick', labelsize=20)\n plt.rc('ytick', labelsize=20)\n # step = 2 fs\n # each round is 2 fs * 1000 = 2 ps\n\n init = 'ala2_1stFrame.pdb' #pdb name\n inits = init\n n_ec = 2 # angles\n count = 1\n newPoints_name = 'start_r_'+str(count)+'.pdb'\n \n #W_0 = [1/n_ec for i in range(n_ec)] # no direction\n #W_0 = [[0.25, 0.25], [0.25, 0.25]]\n W_0 = [[1/(2*n_ec), 1/(2*n_ec)] for i in range(n_ec)] # directional\n print(W_0)\n\n Ws = []\n Ws.append(W_0)\n \n trj1 = self.run(production_steps = s, start=inits, production='trj_R_0.pdb') # return mdtraj object\n comb_trj1 = trj1 # single trajectory\n trjs = comb_trj1\n trj1_theta = self.map_angles(trj1) # changed for angles to display\n print('trj1_theta', len(trj1_theta), len(trj1_theta[0]))\n trj1_Ps_theta, index = self.PreSamp(trj1_theta, myn_clusters = 10) # pre analysis (least count)\n trj1_Ps_w_theta, index_w = self.PreSamp(trj1_theta, myn_clusters = 100) # for updating the weights\n print('trj1_Ps_theta', len(trj1_Ps_theta), len(trj1_Ps_theta[0]))\n\n newPoints_index_orig = self.findStarting(trj1_Ps_theta, index, W_0, starting_n = N , method = 'RL') #need change\n newPoints = trj1[newPoints_index_orig[0]]\n newPoints.save_pdb(newPoints_name)\n \n \n print('trj1_theta[0]',trj1_theta[0])\n plt.scatter(trj1_theta[0], trj1_theta[1], color='dodgerblue', s=5, alpha=0.2)\n plt.xlim([-180, 180])\n plt.ylim([-180, 180])\n newPoints_theta_x = trj1_theta[0][newPoints_index_orig[0]]\n newPoints_theta_y = trj1_theta[1][newPoints_index_orig[0]]\n plt.scatter(newPoints_theta_x, newPoints_theta_y, color='red', s=50)\n plt.xlabel(r'$\\phi$')\n plt.ylabel(r'$\\psi$')\n plt.savefig('fig_'+str(count))\n plt.close()\n trjs_theta = trj1_theta\n trjs_Ps_theta = trj1_Ps_theta\n trjs_Ps_w_theta = trj1_Ps_w_theta \n for round in range(R):\n self.updateStat(trjs_theta) # based on all trajectories\n #W_1 = self.updateW(trjs_Ps_theta, W_0) \n W_1 = self.updateW(trjs_Ps_w_theta, W_0) \n W_0 = W_1\n W_1 = W_0\n Ws.append(W_0)\n s = 1000\n trj1 = self.run(production_steps = s, start=newPoints_name, production='trj_R_'+str(count)+'.pdb') # return mdtraj object\n com_trjs = trjs.join(trj1) \n trjs = com_trjs\n trjs_theta = np.array(self.map_angles(trjs)) \n trjs_Ps_theta, index = self.PreSamp(trjs_theta, myn_clusters = 100)\n myn_clusters1 = 100 #int(10*(round)+1)\n trjs_Ps_w_theta = trjs_Ps_theta\n #trjs_Ps_w_theta, index_w = self.PreSamp(trjs_theta, myn_clusters = myn_clusters1)\n newPoints_index_orig = self.findStarting(trjs_Ps_theta, index, W_1, starting_n = N , method = 'RL')\n newPoints = trjs[newPoints_index_orig[0]] \n \n count = count + 1\n newPoints_name = 'start_r_'+str(count)+'.pdb'\n newPoints.save_pdb(newPoints_name)\n\n print( myn_clusters1, W_1, self.theta_mean)\n plt.scatter(trjs_theta[0], trjs_theta[1], color='dodgerblue', s=5, alpha=0.2)\n plt.xlim([-np.pi, np.pi])\n plt.ylim([-np.pi, np.pi])\n newPoints_theta_x = trjs_theta[0][newPoints_index_orig[0]]\n newPoints_theta_y = trjs_theta[1][newPoints_index_orig[0]]\n plt.scatter(newPoints_theta_x, newPoints_theta_y, color='red', s=50)\n plt.scatter(trjs_Ps_w_theta[0], trjs_Ps_w_theta[1], color='green', s=5)\n plt.xlabel(r'$\\phi$')\n plt.ylabel(r'$\\psi$')\n plt.savefig('fig_'+str(count))\n plt.close()\n \n np.save('w_'+'r'+str(int(R))+'N'+str(N)+'s'+str(s), Ws)\n np.save('trjs_theta', trjs_theta)\n return", "def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def lwr_recursion(r):\r\n\r\n # r is (P+1, nc, nc)\r\n nc = r.shape[1]\r\n P = r.shape[0] - 1\r\n\r\n a = np.zeros((P, nc, nc)) # ar coefs\r\n b = np.zeros_like(a) # lp coefs\r\n sigb = np.zeros_like(r[0]) # forward prediction error covariance\r\n sigf = np.zeros_like(r[0]) # backward prediction error covariance\r\n delta = np.zeros_like(r[0])\r\n\r\n # initialize\r\n idnt = np.eye(nc)\r\n sigf[:] = r[0]\r\n sigb[:] = r[0]\r\n\r\n # iteratively find sequences A_{p+1}(i) and B_{p+1}(i)\r\n for p in range(P):\r\n\r\n # calculate delta_{p+1}\r\n # delta_{p+1} = r(p+1) + sum_{i=1}^{p} a(i)r(p+1-i)\r\n delta[:] = r[p + 1]\r\n for i in range(1, p + 1):\r\n delta += np.dot(a[i - 1], r[p + 1 - i])\r\n\r\n # intermediate values XXX: should turn these into solution-problems\r\n ka = np.dot(delta, linalg.inv(sigb))\r\n kb = np.dot(delta.conj().T, linalg.inv(sigf))\r\n\r\n # store a_{p} before updating sequence to a_{p+1}\r\n ao = a.copy()\r\n # a_{p+1}(i) = a_{p}(i) - ka*b_{p}(p+1-i) for i in {1,2,...,p}\r\n # b_{p+1}(i) = b_{p}(i) - kb*a_{p}(p+1-i) for i in {1,2,...,p}\r\n for i in range(1, p + 1):\r\n a[i - 1] -= np.dot(ka, b[p - i])\r\n for i in range(1, p + 1):\r\n b[i - 1] -= np.dot(kb, ao[p - i])\r\n\r\n a[p] = -ka\r\n b[p] = -kb\r\n\r\n sigf = np.dot(idnt - np.dot(ka, kb), sigf)\r\n sigb = np.dot(idnt - np.dot(kb, ka), sigb)\r\n\r\n return a, sigf", "def branch_competetive(state, time, d):\n\n th0 = state[0] \n th1 = state[1:(d[\"alpha1\"]+d[\"alpha1_p\"])]\n th2 = state[(d[\"alpha1\"]+d[\"alpha1_p\"]):]\n \n #print(len(state), len(th1))\n ### get all cytokine secreting cells \n th1_all = np.sum(th1[-d[\"alpha1_p\"]:])\n th2_all = np.sum(th2[-d[\"alpha2_p\"]:])\n ### calculate cytokine concentrations\n cyto_1 = d[\"beta_cyto_1\"]*th1_all + d[\"ifn_ext\"]\n cyto_2 = d[\"beta_cyto_2\"]*th2_all + d[\"il21_ext\"]\n ### calculate cytokine effect on rate\n fb1 = d[\"fb_rate1\"]*cyto_1**3/(cyto_1**3+d[\"K_1\"]**3)\n fb2 = d[\"fb_rate2\"]*cyto_2**3/(cyto_2**3+d[\"K_2\"]**3)\n ### update differantiation rate\n beta1 = d[\"beta1\"]*(1+fb1)\n beta2 = d[\"beta2\"]*(1+fb2)\n \n ### differentiate effectors th1 \n alpha = d[\"alpha1\"]\n p = 1.\n dt_th1 = diff_effector2(th1, th0, alpha, beta1, d[\"beta1_p\"], p, d)\n ### differentiate effectors th2\n alpha = d[\"alpha2\"]\n p = 1.\n dt_th2 = diff_effector2(th2, th0, alpha, beta2, d[\"beta2_p\"], p, d)\n \n ### combine all cells\n dt_th0 = -(beta1+beta2)*th0\n dt_state = np.concatenate(([dt_th0], dt_th1, dt_th2))\n\n return dt_state", "def curvecontrol(p1,p2, u_or_d):\r\n## four possibile orders:\r\n## A p1 lower and to left of p2\r\n## B p1 lower and to right of p2\r\n## C p1 higher and to left of p2\r\n## D p1 higher and to right of p2\r\n## B and C are reverse of each other\r\n## A and D are reverse of each other\r\n## so only 2 types of pairs really\r\n## each has a curve up or curve down possibility\r\n## start by converting D to A, and C to B\r\n e1 = 0.0001\r\n e2 = 0.9\r\n e1c = 1 - e1\r\n e2c = 0.5\r\n cp1 = []\r\n cp2 = []\r\n if p2[1] < p1[1]:\r\n resort = True\r\n ptemp = p2\r\n p2 = p1\r\n p1 = ptemp\r\n else:\r\n resort = False\r\n if p1[0] < p2[0]: ## type A\r\n if u_or_d: ## curve up\r\n cp1.append( ((p2[0]-p1[0]) * e1) + p1[0])\r\n cp1.append( ((p2[1]-p1[1]) * e2) + p1[1])\r\n cp2.append( ((p2[0]-p1[0]) * e2c) + p1[0])\r\n cp2.append( ((p2[1]-p1[1]) * e1c) + p1[1])\r\n else:\r\n cp1.append( ((p2[0]-p1[0]) * e2) + p1[0])\r\n cp1.append( ((p2[1]-p1[1]) * e1) + p1[1])\r\n cp2.append( ((p2[0]-p1[0]) * e1c) + p1[0])\r\n cp2.append( ((p2[1]-p1[1]) * e2c) + p1[1])\r\n else: ## type B\r\n if u_or_d: ## curve up\r\n cp1.append( p1[0]-((p1[0]-p2[0]) * e1))\r\n cp1.append( ((p2[1]-p1[1]) * e2) + p1[1])\r\n cp2.append( p1[0] - ((p1[0]-p2[0]) * e2c))\r\n cp2.append( ((p2[1]-p1[1]) * e1c) + p1[1])\r\n else:\r\n cp1.append( p1[0]-((p1[0]-p2[0]) * e2))\r\n cp1.append( ((p2[1]-p1[1]) * e1) + p1[1])\r\n cp2.append( p1[0]-((p1[0]-p2[0]) * e1c))\r\n cp2.append( ((p2[1]-p1[1]) * e2c) + p1[1])\r\n if resort:\r\n ptemp = cp2\r\n cp2 = cp1\r\n cp1 = ptemp\r\n return cp1,cp2", "def final_homography(pts1, pts2, feats1, feats2):\n\n #\n # Your code here\n #\n\n idxs1, idxs2 = find_matches(feats1, feats2)\n ransac_return = ransac(pts1[idxs1], pts2[idxs2])\n\n return ransac_return, idxs1, idxs2", "def solver(output_folder, prior_filename, data_filename, Lpost, dpost, resume=True, test_plot=False):\n\n def log_prior(cube, ndim, nparams):\n cube[0] = cube[0]*(F_lim[1] - F_lim[0]) + F_lim[0]\n cube[1] = cube[1]*(A_lim[1] - A_lim[0]) + A_lim[0]\n cube[2] = cube[2]*(Arel_lim[1] - Arel_lim[0]) + Arel_lim[0]\n cube[3] = cube[3]*(Ti_lim[1] - Ti_lim[0]) + Ti_lim[0]\n\n for idx, (w, amp_lim) in enumerate(zip(w_extra, Arel_extra), 4):\n cube[idx] = cube[idx]*(amp_lim[1] - amp_lim[0]) + amp_lim[0]\n\n def log_likelihood(cube, ndim, nparams):\n # I want to fix this at some point\n # i = random.randint(0, nL-1)\n i = np.random.choice(nL)\n L = Lpost[i]\n d = dpost[i]\n # L = 0.380173301412519577E+05\n # d = 0.883628502371783142E+00\n # amps, w, mass, V, Ti = build_function_parameters(cube, nparams)\n\n amps = [cube[1]*cube[2], cube[1]]\n w = list(w0)\n mass = list(mu)\n Ti = [0.025, cube[3]]\n V = [0.0, 0.0]\n\n vals = forward_model(r, L, d, cube[0], w, mass, amps, Ti,\n V, nlambda=2000)\n #vals = offset_forward_model(r, L, d, cube[0], w, mass, amps, Ti,\n # V, sm_ang=False, nlambda=2000, coeff=0.4)\n # trying to q offset here\n #vals += cube[1] * 0.15 / (1.0 + cube[0])\n\n chisq = np.sum((vals - sig)**2 / error**2)\n return -chisq / 2.0\n\n def build_function_parameters(cube, nparams):\n \"\"\"\n Helper function for building some intermediate lists of parameters\n needed for the forward q.\n\n Note that you need to be careful with the cube parameter. It is not a\n python list! I believe it is some kind of fortran array. For example,\n you cannot call len() on it.\n \"\"\"\n amps = [0.0 for _ in range(nparams-4+2)]\n amps[0] = cube[2]\n amps[1] = 1.0\n for idx, x in enumerate(amps[2:], 2):\n amps[idx] = cube[idx+2]\n #amps.extend([x for x in list(cube[4:])])\n amps = [x * cube[1] for x in amps]\n\n w = [x for x in w0]\n w += w_extra\n\n mass = [x for x in mu]\n mass += [mu[0] for _ in w_extra]\n\n V = [0.0 for _ in mass]\n\n #Ti = [0.025*1000.0/300.0, cube[3]]\n #Ti += [0.025*1000.0/300.0 for _ in w_extra]\n\n Ti = [0.025 for _ in w]\n Ti[1] = cube[3]\n\n return amps, w, mass, V, Ti\n\n with open(prior_filename, 'r') as infile:\n prior = json.load(infile, parse_float=np.float64)\n\n data = io.h5_2_dict(data_filename)\n\n nL = len(Lpost)\n ix = data['fit_ix']['0'][0:-1:3]\n r = data['r'][ix]\n sig = data['sig'][ix]\n error = data['sig_sd'][ix]\n\n F_lim = prior['F_lim']\n A_lim = (0.6*np.max(sig), 1.4*np.max(sig))\n Arel_lim = prior['Arel_lim']\n Ti_lim = prior['Ti_lim']\n w_extra = prior['w_extra']\n Arel_extra = prior['Arel_extra']\n\n assert len(w_extra) == len(Arel_extra)\n\n n_params = 4 + len(w_extra)\n folder = abspath(output_folder)\n\n print('There are {0:d} paremeters for MultiNest'.format(n_params))\n\n if test_plot:\n npts = 30\n test_sig = np.zeros((npts, len(r)))\n for i in range(npts):\n j = random.randint(0, nL-1)\n L = Lpost[j]\n d = dpost[j]\n cube = [random.random() for _ in range(n_params)]\n log_prior(cube, None, None)\n amps, w, mass, V, Ti = build_function_parameters(cube, n_params)\n test_sig[i, :] = forward_model(r, L, d, cube[0], w, mass, amps, Ti,\n V, sm_ang=False, nlambda=2000)\n\n # fig, ax = plt.subplots()\n # for i in xrange(npts):\n # ax.plot(r, test_sig[i, :], 'C0')\n # ax.errorbar(r, sig, yerr=error, fmt='', ecolor='C2', color='C1')\n # plt.show()\n else:\n pymultinest.run(log_likelihood, log_prior, n_params, importance_nested_sampling=False,\n resume=resume, verbose=True, sampling_efficiency='model', n_live_points=100,\n outputfiles_basename=join(folder, 'finesse_'))", "def MatchMatrixs (self,Node1,Node2):\n\n T1Native_Node = Node1\n T1Native_Matrix = slicer.util.arrayFromVolume(T1Native_Node)\n DimN = T1Native_Matrix.shape\n T1Enhanced_Node = Node2\n T1Enhanced_Matrix = slicer.util.arrayFromVolume(T1Enhanced_Node)\n DimE = T1Enhanced_Matrix.shape\n\n NMatrix = self.GetIJKToRASnpArray(T1Native_Node)\n NVector = NMatrix[:-1,-1]\n EMatrix = self.GetIJKToRASnpArray(T1Enhanced_Node)\n EVector = EMatrix[:-1,-1]\n NPixelSize = [np.linalg.norm(NMatrix[:-1,0]), np.linalg.norm(NMatrix[:-1,1])]\n EPixelSize = [np.linalg.norm(EMatrix[:-1,0]), np.linalg.norm(EMatrix[:-1,1])]\n\n Niversor = NMatrix[:-1,0]/NPixelSize[0]\n Njversor = NMatrix[:-1,1]/NPixelSize[1]\n Nkversor = np.round(np.cross(Niversor,Njversor),3)\n Nkstep = round(np.linalg.norm(NMatrix[:-1,2]),3)\n\n Eiversor = EMatrix[:-1,0]/EPixelSize[0]\n Ejversor = EMatrix[:-1,1]/EPixelSize[1]\n Ekversor = np.round(np.cross(Eiversor,Ejversor),3)\n Ekstep = round(np.linalg.norm(EMatrix[:-1,2]),3)\n print(Nkversor,Ekversor,Nkstep,Ekstep,NVector,EVector,(NVector-EVector).dot(Ekversor))\n if not ( np.sum(Nkversor==Ekversor) == 3 and Nkstep==Ekstep and ((NVector-EVector).dot(Ekversor)) == 0 ): # it verifies if the slices are oriented in the same direction, with the same step between slices and if the first images are complanar.\n slicer.util.warningDisplay('The geometry of the LL Native and LL Enhanced volume doesn\\'t match. It could deteriorate the ECV map', windowTitle= 'Warning')\n\n if (DimE == DimN):\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.NativeT1_Selector.currentNode()) \n return [T1Native_Matrix,T1Enhanced_Matrix]\n if (DimE[1:3] == DimN[1:3]):\n k = min([DimE[1],DimN[1]])\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.NativeT1_Selector.currentNode())\n return [T1Native_Matrix[:k,:,:],T1Enhanced_Matrix[:k,:,:]]\n\n jN = np.arange(0,DimN[2]*NPixelSize[1],NPixelSize[1])+NPixelSize[1]/2+(NVector-EVector).dot(Njversor)\n iN = np.arange(0,DimN[1]*NPixelSize[0],NPixelSize[0])+NPixelSize[0]/2+(NVector-EVector).dot(Niversor)\n iE = np.arange(0,DimE[1]*EPixelSize[0],EPixelSize[0])+EPixelSize[0]/2\n jE = np.arange(0,DimE[2]*EPixelSize[1],EPixelSize[1])+EPixelSize[1]/2 \n if DimE[1] > DimN[1]: ## I concidered a square image\n T1Nreshaped = np.zeros(DimE)\n for k in range(DimN[0]):\n f = interpolate.interp2d(iN, jN, np.nan_to_num(T1Native_Matrix[k,:,:]), fill_value = 0)\n T1Nreshaped[k,:,:] = f(iE, jE)\n T1Ereshaped = T1Enhanced_Matrix[:k+1,:,:]\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.EnhancedT1_Selector.currentNode())\n return [T1Nreshaped,T1Ereshaped]\n else:\n T1Ereshaped = np.zeros(DimN)\n for k in range(DimE[0]):\n f = interpolate.interp2d(iE, jE, np.nan_to_num(T1Enhanced_Matrix[k,:,:]), fill_value = 0)\n T1Ereshaped[k,:,:] = f(iN, jN) \n T1Nreshaped = T1Native_Matrix[:k+1,:,:]\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.NativeT1_Selector.currentNode()) \n return [T1Nreshaped,T1Ereshaped]", "def _comp_het_pair_pattern(self,\n gt_types1, gt_nums1,\n gt_types2, gt_nums2,\n gt_phases1, gt_phases2):\n\n # already phased before sending here.\n ret = {'candidates': [], 'priority': 4}\n for kid in self.samples_with_parent:\n if gt_nums1[kid._i] == gt_nums2[kid._i]: continue\n if not (gt_types1[kid._i] == HET and gt_types2[kid._i] == HET): continue\n #if not (gt_phases1[kid._i] and gt_phases2[kid._i]): continue\n if gt_types1[kid.mom._i] == HOM_ALT or gt_types2[kid.dad._i] == HOM_ALT: continue\n mom, dad = kid.mom, kid.dad\n\n kid_phased = gt_phases1[kid._i] and gt_phases2[kid._i]\n dad_phased = gt_phases1[dad._i] and gt_phases2[dad._i]\n mom_phased = gt_phases1[mom._i] and gt_phases2[mom._i]\n\n if kid_phased and dad_phased and (gt_nums1[dad._i] == gt_nums1[kid._i]) and (gt_nums2[dad._i] == gt_nums2[kid._i]):\n continue\n if kid_phased and mom_phased and (gt_nums1[mom._i] == gt_nums1[kid._i]) and (gt_nums2[mom._i] == gt_nums2[kid._i]):\n continue\n\n if kid_phased and dad_phased and mom_phased and gt_types1[dad._i] != gt_types2[dad._i] and gt_types1[mom._i] != gt_types2[mom._i]:\n priority = 1\n\n elif kid_phased and gt_types1[dad._i] != gt_types1[mom._i] and gt_types2[dad._i] != gt_types2[mom._i]:\n # parents are unphased hets at different sites.\n priority = 1\n else:\n priority = 2\n for parent in (kid.mom, kid.dad):\n # unphased het\n if gt_types2[parent._i] == gt_types1[parent._i] == HET:\n priority += 1\n\n ret['candidates'].append(kid)\n ret['priority'] = min(ret['priority'], priority)\n ret['candidate'] = len(ret['candidates']) > 0\n return ret", "def test_SetMultipleMovingLoadsMultipleConditionsDifferentOriginReversedConfigurationPositive(self):\n\n # create nodes\n second_coord = [1.0, 0.0, 0.0]\n third_coord = [2.0, 0.0, 0.0]\n self.mp.CreateNewNode(1, 0.0, 0.0, 0.0)\n self.mp.CreateNewNode(2, second_coord[0],second_coord[1],second_coord[2])\n self.mp.CreateNewNode(3, third_coord[0], third_coord[1], third_coord[2])\n\n # create condition\n self.mp.CreateNewCondition(\"MovingLoadCondition2D2N\", 1, [3, 2], self.mp.GetProperties()[1])\n self.mp.CreateNewCondition(\"MovingLoadCondition2D2N\", 2, [2, 1], self.mp.GetProperties()[1])\n\n parameters = self.base_parameters\n parameters.AddVector(\"origin\", [1.25, 0, 0])\n parameters.AddVector(\"configuration\", [0.25])\n\n process = GMA.SetMultipleMovingLoadsProcess(self.mp,parameters)\n\n # get conditions\n conditions = []\n conditions.append(self.cmp.GetCondition(3))\n conditions.append(self.cmp.GetCondition(4))\n\n # initialize and set load\n process.ExecuteInitialize()\n process.ExecuteInitializeSolutionStep()\n\n # initialise matrices\n lhs = KratosMultiphysics.Matrix(0,0)\n rhs = KratosMultiphysics.Vector(0)\n\n # set load on node\n all_rhs = []\n for cond in conditions:\n cond.CalculateLocalSystem(lhs, rhs, self.mp.ProcessInfo)\n all_rhs.append(list(rhs))\n\n self.checkRHS(all_rhs[0], [0.0, -1.0, 0.0, -1.0])\n self.checkRHS(all_rhs[1], [0.0, 0.0, 0.0, 0.0])", "def light_source_directions():\n L = np.array([[-0.06059872, -0.44839055, 0.8917812],\n [-0.05939919, -0.33739538, 0.93948714],\n [-0.05710194, -0.21230722, 0.97553319],\n [-0.05360061, -0.07800089, 0.99551134],\n [-0.04919816, 0.05869781, 0.99706274],\n [-0.04399823, 0.19019233, 0.98076044],\n [-0.03839991, 0.31049925, 0.9497977],\n [-0.03280081, 0.41611025, 0.90872238],\n [-0.18449839, -0.43989616, 0.87889232],\n [-0.18870114, -0.32950199, 0.92510557],\n [-0.1901994, -0.20549935, 0.95999698],\n [-0.18849605, -0.07269848, 0.97937948],\n [-0.18329657, 0.06229884, 0.98108166],\n [-0.17500445, 0.19220488, 0.96562453],\n [-0.16449474, 0.31129005, 0.93597008],\n [-0.15270716, 0.4160195, 0.89644202],\n [-0.30139786, -0.42509698, 0.85349393],\n [-0.31020115, -0.31660118, 0.89640333],\n [-0.31489186, -0.19549495, 0.92877599],\n [-0.31450962, -0.06640203, 0.94692897],\n [-0.30880699, 0.06470146, 0.94892147],\n [-0.2981084, 0.19100538, 0.93522635],\n [-0.28359251, 0.30729189, 0.90837601],\n [-0.26670649, 0.41020998, 0.87212122],\n [-0.40709586, -0.40559588, 0.81839168],\n [-0.41919869, -0.29999906, 0.85689732],\n [-0.42618633, -0.18329412, 0.88587159],\n [-0.42691512, -0.05950211, 0.90233197],\n [-0.42090385, 0.0659006, 0.90470827],\n [-0.40860354, 0.18720162, 0.89330773],\n [-0.39141794, 0.29941372, 0.87013988],\n [-0.3707838, 0.39958255, 0.83836338],\n [-0.499596, -0.38319693, 0.77689378],\n [-0.51360334, -0.28130183, 0.81060526],\n [-0.52190667, -0.16990217, 0.83591069],\n [-0.52326874, -0.05249686, 0.85054918],\n [-0.51720021, 0.06620003, 0.85330035],\n [-0.50428312, 0.18139393, 0.84427174],\n [-0.48561334, 0.28870793, 0.82512267],\n [-0.46289771, 0.38549809, 0.79819605],\n [-0.57853599, -0.35932235, 0.73224555],\n [-0.59329349, -0.26189713, 0.76119165],\n [-0.60202327, -0.15630604, 0.78303027],\n [-0.6037003, -0.04570002, 0.7959004],\n [-0.59781529, 0.06590169, 0.79892043],\n [-0.58486953, 0.17439091, 0.79215873],\n [-0.56588359, 0.27639198, 0.77677747],\n [-0.54241965, 0.36921337, 0.75462733],\n [0.05220076, -0.43870637, 0.89711304],\n [0.05199786, -0.33138635, 0.9420612],\n [0.05109826, -0.20999284, 0.97636672],\n [0.04919919, -0.07869871, 0.99568366],\n [0.04640163, 0.05630197, 0.99733494],\n [0.04279892, 0.18779527, 0.98127529],\n [0.03870043, 0.30950341, 0.95011048],\n [0.03440055, 0.41730662, 0.90811441],\n [0.17290651, -0.43181626, 0.88523333],\n [0.17839998, -0.32509996, 0.92869988],\n [0.18160174, -0.20480196, 0.96180921],\n [0.18200745, -0.07490306, 0.98044012],\n [0.17919505, 0.05849838, 0.98207285],\n [0.17329685, 0.18839658, 0.96668244],\n [0.1649036, 0.30880674, 0.93672045],\n [0.1549931, 0.41578148, 0.89616009],\n [0.28720483, -0.41910705, 0.8613145],\n [0.29740177, -0.31410186, 0.90160535],\n [0.30420604, -0.1965039, 0.9321185],\n [0.30640529, -0.07010121, 0.94931639],\n [0.30361153, 0.05950226, 0.95093613],\n [0.29588748, 0.18589214, 0.93696036],\n [0.28409783, 0.30349768, 0.90949304],\n [0.26939905, 0.40849857, 0.87209694],\n [0.39120402, -0.40190413, 0.8279085],\n [0.40481085, -0.29960803, 0.86392315],\n [0.41411685, -0.18590756, 0.89103626],\n [0.41769724, -0.06449957, 0.906294],\n [0.41498764, 0.05959822, 0.90787296],\n [0.40607977, 0.18089099, 0.89575537],\n [0.39179226, 0.29439419, 0.87168279],\n [0.37379609, 0.39649585, 0.83849122],\n [0.48278794, -0.38169046, 0.78818031],\n [0.49848546, -0.28279175, 0.8194761],\n [0.50918069, -0.1740934, 0.84286803],\n [0.51360856, -0.05870098, 0.85601427],\n [0.51097962, 0.05899765, 0.8575658],\n [0.50151639, 0.17420569, 0.84742769],\n [0.48600297, 0.28260173, 0.82700506],\n [0.46600106, 0.38110087, 0.79850181],\n [0.56150442, -0.35990283, 0.74510586],\n [0.57807114, -0.26498677, 0.77176147],\n [0.58933134, -0.1617086, 0.7915421],\n [0.59407609, -0.05289787, 0.80266769],\n [0.59157958, 0.057798, 0.80417224],\n [0.58198189, 0.16649482, 0.79597523],\n [0.56620006, 0.26940003, 0.77900008],\n [0.54551481, 0.36380988, 0.7550205]], dtype=float)\n return L", "def LucasKanade(im1, im2, window=9, n_levels=1, alpha=.001, iterations=1):\n\n # radius of window\n hw = int(np.floor(window / 2))\n\n # gradient kernel\n ndim = len(im1.shape)\n grad_kernel = _compute_spatial_gradient_kernel(ndim)\n\n # temporal filter\n temporal_kernel = np.ones(grad_kernel[0].shape) / 2.\n\n for p in xrange(n_levels):\n # init\n if p == 0:\n # flow velocity field\n u = np.zeros(im1.shape)\n v = np.zeros(im1.shape)\n else:\n # zoom flow velocity field\n raise RuntimeError(\"Parallel execution not implemented!\")\n\n # refinement loop\n u = np.round(u)\n v = np.round(v)\n for r in xrange(iterations):\n print \"Iteration %i/%i...\" % (r + 1, iterations)\n\n # loop on every pixel\n for i in xrange(hw, im1.shape[0] - hw):\n for j in xrange(hw, im1.shape[1] - hw):\n print \"\\tWorking on patch (%s-%s, %s-%s)...\" % (i - hw,\n i + hw,\n j - hw,\n j + hw)\n\n patch1 = im1[i - hw:i + hw + 1, j - hw:j + hw + 1]\n\n # move patch: resample grid for im2\n lr = i - hw + v[i, j] + 1\n hr = i + hw + v[i, j] + 1\n lc = j - hw + u[i, j] + 1\n hc = j + hw + u[i, j] + 1\n\n if lr < 1 or hr > im1.shape[0] or lc < 1 or hc >\\\n im1.shape[1]:\n raise Warning(\"Regularized LS not implemented!\")\n else:\n # resample patch on im2 according current motion\n # estimates\n patch2 = im2[lr - 1:hr, lc - 1:hc]\n\n # compute spatial gradient of patch1\n Dx_im1, Dy_im1 = _compute_spatial_gradient(patch1,\n grad_kernel)\n\n # compute spatial gradient of patch2\n Dx_im2, Dy_im2 = _compute_spatial_gradient(patch2,\n grad_kernel)\n\n # compute spatial gradient of film [patch1, patch2]\n # along x axis\n Dx = (Dx_im1 + Dx_im2)[1:window - 1,\n 1:window - 1].T / 2.\n\n # compute spatial gradient of film [patch1, patch2]\n # along y axis\n Dy = (Dy_im1 + Dy_im2)[1:window - 1,\n 1:window - 1].T / 2.\n\n # compute temporal gradient of film [patch1, patch2]\n Dt_1 = scipy.signal.convolve(patch1,\n temporal_kernel)\n Dt_2 = scipy.signal.convolve(patch2,\n temporal_kernel)\n Dt = (Dt_1 - Dt_2)[1:window - 1, 1:window - 1].T / 2.\n\n # make coefficient matrix A\n A = np.vstack((Dx.ravel(), Dy.ravel())).T\n\n # compute G = A'A\n G = np.dot(A.T, A)\n\n # regularize G (to ensure solubility of LS problem)\n G[0, 0] += alpha\n G[1, 1] += alpha\n\n # solve WLS problem for velocity V = (Vx_ij, Vy_ij)\n # patch1 -> patch2\n V = scipy.linalg.lstsq(G, -np.dot(A.T, Dt.ravel()))[0]\n\n # update velocity field around point p = (i, j)\n u[i, j] += V[0]\n v[i, j] += V[1]\n\n # resizing\n u = u[window - 1:u.shape[0] - window + 1,\n window - 1:u.shape[1] - window + 1]\n v = v[window - 1:v.shape[0] - window + 1,\n window - 1:v.shape[1] - window + 1]\n\n # return flow velocity field\n return u, v", "def computeRazor(l0, l1, met):\n metlv = met\n l0 = l0\n l1 = l1\n # lab frame\n vBETA_z = (l0+l1).Vect()*r.Double(1./(l0.E()+l1.E()))\n vBETA_z.SetX(0.0)\n vBETA_z.SetY(0.0)\n l0.Boost(-vBETA_z)\n l1.Boost(-vBETA_z)\n pT_CM = (l0+l1).Vect() + metlv.Vect()\n pT_CM.SetZ(0.0)\n ll = l0+l1\n SHATR = sqrt( 2.*(ll.E()*ll.E() - ll.Vect().Dot(pT_CM)\n + ll.E()*sqrt( ll.E()*ll.E() + pT_CM.Mag2() - 2.*ll.Vect().Dot(pT_CM) )))\n vBETA_T_CMtoR = pT_CM * r.Double(1./sqrt(pT_CM.Mag2() + SHATR*SHATR))\n l0.Boost(-vBETA_T_CMtoR)\n l1.Boost(-vBETA_T_CMtoR)\n ll.Boost(-vBETA_T_CMtoR)\n # R-frame\n dphi_LL_vBETA_T = fabs((ll.Vect()).DeltaPhi(vBETA_T_CMtoR))\n dphi_L1_L2 = fabs(l0.Vect().DeltaPhi(l1.Vect()))\n vBETA_R = (l0.Vect() - l1.Vect())*r.Double(1./(l0.E()+l1.E()))\n try:\n gamma_R = 1./sqrt(1.-vBETA_R.Mag2())\n except ValueError:\n print 1.-vBETA_R.Mag2()\n dphi_vBETA_R_vBETA_T = fabs(vBETA_R.DeltaPhi(vBETA_T_CMtoR))\n l0.Boost(-vBETA_R)\n l1.Boost(vBETA_R)\n # R+1 frame\n MDELTAR = 2.*l0.E()\n costhetaRp1 = l0.Vect().Dot(vBETA_R)/(l0.Vect().Mag()*vBETA_R.Mag())\n return dphi_LL_vBETA_T, MDELTAR", "def construct_inv_boundaries(params,par_dict,eq_dict,K_RC,K_CP,m_P):\n #intrapop params\n q1=par_dict['q1']\n q2=par_dict['q2']\n K =par_dict['K']\n m_C= K_CP*m_P\n q10 = params['q10']\n q20 = params['q20']\n hC0 = params['hC0']\n hP0 = params['hP0']\n\n #interpop params\n a1=par_dict['a1']\n a2=par_dict['a2']\n a3=par_dict['a3']\n e1=params['e1']\n e2=params['e2']\n e3=params['e3']\n \n\n t_hc = par_dict['t_hc']\n t_hp = par_dict['t_hp']\n\n #eq values\n\n #L-V\n R_eq_s2 = eq_dict['R_eq_s2']\n C_eq_s2 = eq_dict['C_eq_s2']\n P_eq_s3 = eq_dict['P_eq_s3']\n R_eq_s3 = eq_dict['R_eq_s3']\n #R-M\n R_eq_s2RM = eq_dict['R_eq_s2RM']\n C_eq_s2RM = eq_dict['C_eq_s2RM']\n R_eq_s3RM = eq_dict['R_eq_s3RM']\n P_eq_s3RM = eq_dict['P_eq_s3RM']\n \n ##Invasibility boundaries\n\n #L-V\n I_C_s2 = set_I_C_s2(e1,a1,K,q1)\n I_P_s3 = set_I_P_s3(e2,a2,K,q2)\n I_P_s4 = set_I_P_s4(e2,e3,a2,a3,q2,R_eq_s2,C_eq_s2)\n I_C_s5 = set_I_C_s5(e1,a1,a3,R_eq_s3,P_eq_s3,q1)\n \n #R-M\n I_C_s2RM = set_I_C_s2RM(e1,a1,K,q1,hC0,q10)\n I_P_s3RM = set_I_P_s3RM(e2,a2,K,q2,hP0,q20)\n I_P_s4RM = set_I_P_s4RM(e2,e3,a2,a3,q2,R_eq_s2RM,C_eq_s2RM,hP0,q20)\n I_C_s5RM = set_I_C_s5RM(e1,e2,a1,a3,m_C,R_eq_s3RM,P_eq_s3RM,q1,t_hc,q10,q20,hP0,hC0) \n\n inv_dict= {'I_C_s2':I_C_s2,'I_P_s3':I_P_s3,'I_P_s4':I_P_s4,'I_C_s5':I_C_s5,\n 'I_C_s2RM':I_C_s2RM,'I_P_s3RM':I_P_s3RM,'I_P_s4RM':I_P_s4RM,'I_C_s5RM':I_C_s5RM}\n\n return inv_dict", "def origami_H2_1cyl(l1,l2,l3,h,t):\n l = l1 + l2 + l3\n z = (h-1)*l+1\n x = [None] + range(2,h*l+2)\n for i in xrange(0,h*l,l):\n x[i+l] = i+1\n\n y = [None] + range(l+1,l*h+1) + [None]*l\n for i in xrange(l3):\n y[z + (t+i)%l] = l1+l2+i+1\n for i in xrange(l2):\n y[z + (l3+t+i)%l] = l1+i+1\n for i in xrange(l1):\n y[z + (l3+l2+t+i)%l] = i+1\n\n return Origami(x[1:],y[1:])", "def compute_transition(seg_actif, seg_next):\n\n\t#Recuperation des points A (debut) et B (fin) du premier segment\n\ta = seg_actif.start\n\tb = seg_actif.end\n\n\t#calcul du track change entre les deux segments\n\ttrack_change = np.arccos((seg_actif.scal(seg_next)) / (seg_actif.norm() * seg_next.norm())) * RAD2DEG # en degrés\n\t#print(\"track_change=\", track_change) #en degré\n\n\t#calcul bank_angle, turn_radius et lead_distance\n\n\tif ALTITUDE>195:\n\t\tmax_angle = (16 - 25) / (300 - 195) * (ALTITUDE - 195) + 25\n\t\tbank_angle = max(5, min(0.5 * track_change, max_angle)) #en DEG\n\t\tturn_radius = GS ** 2 / (G * np.tan(bank_angle / RAD2DEG)) / NM2M # NM\n\t\tlead_distance = turn_radius * np.tan(0.5 * track_change / RAD2DEG) # NM\n\t\tif lead_distance > 20: # NM\n\t\t\tlead_distance = 20 # NM\n\t\t\tturn_radius = lead_distance / np.tan(0.5 * track_change / RAD2DEG)\n\t\t\tbank_angle = max(5, min(np.arctan(GS ** 2) / (G * turn_radius), max_angle))\n\t\t#print(\"lead_distance\", lead_distance)\n\telse :\n\t\tmax_angle = 25 #DEG\n\t\tbank_angle = max(5, min(0.5*track_change,max_angle)) #DEG\n\t\tturn_radius = GS**2 / (G*np.tan(bank_angle / RAD2DEG)) / NM2M # NM\n\t\tlead_distance = turn_radius * np.tan(0.5 * track_change / RAD2DEG)\n\n\t#calcul de b_in et b_out : points de debut et fin de la transition en arc de cercle\n\tif track_change < EPSILON:\n\t\tb_in = b\n\t\tb_out = b\n\t\tb_center = b\n\n\telse:\n\t\tnorme_act = seg_actif.norm()\n\t\tactive_track = get_track(seg_actif) # en RAD\n\t\tnext_track = get_track(seg_next)\n\t\tb_in = calcul_point_de_transition(a, norme_act, lead_distance, active_track)\n\t\tb_out = calcul_point_de_transition(b, lead_distance, 0, next_track)\n\n\t\t#calcul de l'angle a_b_bcenter et du point b_center (centre de l'arc de transition)\n\n\t\td = (turn_radius ** 2 + lead_distance ** 2) ** 0.5\n\t\tif seg_actif.det(seg_next) > 0:\n\t\t\ta_b_bc_angle = ((180 + track_change) / 2) / RAD2DEG # en rad\n\t\t\tb_center = g.Point(b.x + d * np.sin((active_track - a_b_bc_angle)),\n\t\t\t\t\t\t b.y + d * np.cos((active_track - a_b_bc_angle)))\n\t\t\tbank_angle = - bank_angle\n\n\t\telse:\n\t\t\ta_b_bc_angle = ((180 - track_change) / 2) / RAD2DEG\n\t\t\tb_center = g.Point(b.x - d * np.sin((active_track - a_b_bc_angle)),\n\t\t\t\t\t\t b.y - d * np.cos((active_track - a_b_bc_angle)))\n\n\treturn(track_change, turn_radius, b_in, b_out, b_center, lead_distance, bank_angle)", "def proz(): \r\n print(\"processing: \",CURDATA()[0]) \r\n Check_180turn(left_boundary,right_boundary)\r\n EF() #exponential window multiplication + fourier\r\n APK0() #1. Phase correction 0th Ordnung\r\n APK1() #1. Phase correction 1st Ordnung\r\n ABS() #Baseline correction\r\n APK()\r\n ABS() #Baseline correction\r\n Check_180turn(left_boundary,right_boundary)", "def test_two_phase_region_usage():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K\n assert tpr.compsets_belong_in_region(compsets_300)\n tpr.add_compsets(compsets_300)\n assert len(tpr.compsets) == 2", "def problem2():\n \n pts_array, feats_array = p2.load_pts_features('data/pts_feats.npz')\n\n # points and features for image1 and image2\n pts1, pts2 = pts_array\n fts1, fts2 = feats_array\n\n # Loading images\n img1 = Image.open('data/img1.png')\n img2 = Image.open('data/img2.png')\n\n im1 = np.array(img1)\n im2 = np.array(img2)\n\n plt.figure(1)\n plt.subplot(1, 2, 1)\n plt.imshow(im1)\n plt.plot(pts1[:, 0], pts1[:, 1], 'ro', markersize=1.3)\n plt.subplot(1, 2, 2)\n plt.imshow(im2)\n plt.plot(pts2[:, 0], pts2[:, 1], 'ro', markersize=1.3)\n\n # display algined image\n H, ix1, ix2 = p2.final_homography(pts1, pts2, feats_array[0],\n feats_array[1])\n\n pts1 = pts1[ix1]\n pts2 = pts2[ix2]\n\n plt.figure(2)\n plt.subplot(1, 3, 1).set_title('Image 1')\n plt.imshow(im1)\n plt.plot(pts1[:, 0],\n pts1[:, 1],\n 'ro',\n markersize=2.3,\n markerfacecolor='none')\n plt.subplot(1, 3, 2).set_title('Image 2')\n plt.imshow(im2)\n plt.plot(pts2[:, 0],\n pts2[:, 1],\n 'ro',\n markersize=2.3,\n markerfacecolor='none')\n plt.subplot(1, 3, 3).set_title('Algined image 1')\n\n H_inv = np.linalg.inv(H)\n H_inv /= H_inv[2, 2]\n im3 = img1.transform(size=(im1.shape[1], im1.shape[0]),\n method=Image.PERSPECTIVE,\n data=H_inv.ravel(),\n resample=Image.BICUBIC)\n\n plt.show()", "def ellipsoidPair(N,srcdist=89.61e3+1.5e3,primalign=np.zeros(6),\\\n secalign=np.zeros(6),rrays=False,f=None,\\\n plist=[[0],[0],[0]],hlist=[[0],[0],[0]]):\n #Establish subannulus of rays\n r1 = conic.ellipsoidRad(srcdist,1.,220.,8400.,8500.)\n rays = sources.subannulus(220.,r1,100./220.,N,zhat=-1.)\n tran.pointTo(rays,0,0,srcdist,reverse=1.)\n## #Transform to node position\n## tran.transform(rays,220,0,0,0,0,0)\n## #Set up finite source distance\n## raydist = sqrt(srcdist**2+rays[1]**2+rays[2]**2)\n## rays[4] = rays[1]/raydist\n## rays[5] = rays[2]/raydist\n## rays[6] = -sqrt(1.-rays[4]**2-rays[5]**2)\n\n #Place mirror pair\n coords = [tran.tr.identity_matrix()]*4\n prad = conic.ellipsoidRad(srcdist,1.,220.,8400.,8450.)\n tran.transform(rays,prad,0,50.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*primalign,coords=coords)\n tran.transform(rays,-prad,0,-8450.,0,0,0,\\\n coords=coords)\n surf.ellipsoidPrimaryLL(rays,220.,8400.,srcdist,1.,8500.,8400.,100./220,\\\n *plist)\n #Vignette any rays outside of active area\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8500.,\\\n rays[3]>8400.))\n## surf.ellipsoidPrimary(rays,220.,8400.,srcdist,1.)\n tran.reflect(rays)\n #Place secondary in primary's reference frame\n srad = conic.ehSecRad(srcdist,1.,220.,8400.,8350.)\n tran.transform(rays,srad,0,8350.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*secalign,coords=coords)\n tran.itransform(rays,srad,0,8350.,0,0,0,\\\n coords=coords)\n## surf.ellipsoidSecondary(rays,220.,8400.,srcdist,1.)\n surf.ellipsoidSecondaryLL(rays,220.,8400.,srcdist,1.,8400.,8300.,100./220,\\\n *hlist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8400.,\\\n rays[3]>8300.))\n ang = anal.grazeAngle(rays)\n tran.reflect(rays)\n\n #Go back to nominal node reference frame and down to focus\n rays = tran.applyT(rays,coords,inverse=True)\n\n if f is None:\n f = -surf.focusI(rays)\n print f\n else:\n tran.transform(rays,0,0,-f,0,0,0)\n surf.flat(rays)\n\n if rrays is True:\n return rays\n \n return anal.hpd(rays)/f * 180/np.pi * 60.**2", "def identify_leaflets(u, time_ts):\n z = u.select_atoms(\"all\").center_of_geometry()[2]\n COM_z= np.array([0,0,z]) #defines the global midplane position along z\n x, y, z = u.trajectory.ts.triclinic_dimensions[0][0], u.trajectory.ts.triclinic_dimensions[1][1], u.trajectory.ts.triclinic_dimensions[2][2]\n box = np.array([x, y, z, 90, 90, 90]) \n ### Determining side of the bilayer CHOL belongs to in this frame\n lipid1 = 'CHL'\n lipid2 = 'DLIP'\n lipid3 = 'SSM'\n lipid4 = 'DSPC'\n \n lpd1_atoms = u.select_atoms('resname %s and name O2'%lipid1) \n lpd2_atoms = u.select_atoms('resname %s and name P '%lipid2) \n lpd3_atoms = u.select_atoms('resname %s and name P '%lipid3) \n lpd4_atoms = u.select_atoms('resname %s and name P '%lipid4)\n \n num_lpd2 = lpd2_atoms.n_atoms\n num_lpd3 = lpd3_atoms.n_atoms\n num_lpd4 = lpd4_atoms.n_atoms \n # atoms in the upper leaflet as defined by insane.py or the CHARMM-GUI membrane builders\n # select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet\n # this must be done because CHOL rapidly flip-flops between leaflets\n # so we must assign CHOL to each leaflet at every time step, and in large systems\n # with substantial membrane undulations, a simple cut-off in the z-axis just will not cut it\n if side == 'up':\n lpd2i = lpd2_atoms[:int((num_lpd2)/2)]\n lpd3i = lpd3_atoms[:int((num_lpd3)/2)]\n lpd4i = lpd4_atoms[:int((num_lpd4)/2)]\n \n\n lipids = lpd2i + lpd3i + lpd4i \n\n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box) \n lpd1i = ns_lipids.search(lipids,15.0) #1.5 nm\n leaflet = lpd1i + lpd2i + lpd3i + lpd4i \n\n elif side == 'down':\n lpd2i = lpd2_atoms[int((num_lpd2)/2):]\n lpd3i = lpd3_atoms[int((num_lpd3)/2):]\n lpd4i = lpd4_atoms[int((num_lpd4)/2):]\n\n lipids = lpd2i + lpd3i + lpd4i #+ lpd3i\n \n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)\n lpd1i = ns_lipids.search(lipids,15.0) # 1.5nm\n leaflet = lpd1i + lpd2i + lpd3i+ lpd4i \n return lpd1i, lpd2i, lpd3i, lpd4i, COM_z, box, leaflet", "def __init__(self, phase, lc, err, airmass, crpa, common_mode_array, \\\n psf_width, psf_width_ratio, psf_yposition, shift_position,\n x1_array, prior_string, priorsigmas):\n self.phase = phase\n self.lc = lc\n self.err= err\n self.airmass = airmass\n self.crpa = crpa\n self.common_mode_array = common_mode_array\n self.psf_width = psf_width\n self.psf_width_ratio = psf_width_ratio\n self.psf_yposition = psf_yposition\n self.shift_position = shift_position\n self.x1_array = x1_array ## generic additional correction.\n self.prior_string = prior_string ## prior shape, 'gaus', 'tophat'\n self.priorsigmas = priorsigmas", "def ml_loop(side: str):\n\n # === Here is the execution order of the loop === #\n # 1. Put the initialization code here\n ball_served = False\n blocker_last_x = 0\n\n class Pred:\n pred = 100\n blocker_pred_x = 0\n last_command = 0\n blocker_vx = 0\n\n \n def move_to(player, pred) : #move platform to predicted position to catch ball \n if player == '1P':\n if scene_info[\"platform_1P\"][0]+20 > (pred-10) and scene_info[\"platform_1P\"][0]+20 < (pred+10): return 0 # NONE\n elif scene_info[\"platform_1P\"][0]+20 <= (pred-10) : return 1 # goes right\n else : return 2 # goes left\n else :\n if scene_info[\"platform_2P\"][0]+20 > (pred-10) and scene_info[\"platform_2P\"][0]+20 < (pred+10): return 0 # NONE\n elif scene_info[\"platform_2P\"][0]+20 <= (pred-10) : return 1 # goes right\n else : return 2 # goes left\n\n def ml_loop_for_1P(): \n # ball slicing\n if scene_info[\"ball_speed\"][1] > 0 and (scene_info[\"ball\"][1]+scene_info[\"ball_speed\"][1]) >= 415 and Pred.last_command == 0:\n print(\"------\")\n ball_x = scene_info[\"ball\"][0]\n ball_y = scene_info[\"ball\"][1]\n ball_vx = scene_info[\"ball_speed\"][0]\n ball_slice_vx = scene_info[\"ball_speed\"][0]+np.sign(scene_info[\"ball_speed\"][0])*3\n ball_vy = scene_info[\"ball_speed\"][1] \n blocker_x = scene_info['blocker'][0] + Pred.blocker_vx\n \n y = abs((415 - ball_y) // ball_vy)\n pred_ball_1P = ball_x + ball_vx * y\n\n y = abs((415 - 260) // ball_vy)\n pred_ball_blocker = pred_ball_1P + ball_slice_vx * y\n bound = pred_ball_blocker // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n pred_ball_blocker = pred_ball_blocker - bound*200 \n else :\n pred_ball_blocker = 200 - (pred_ball_blocker - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n pred_ball_blocker = abs(pred_ball_blocker - (bound+1) *200)\n else :\n pred_ball_blocker = pred_ball_blocker + (abs(bound)*200)\n \n y = abs((415 - 260) // ball_vy)\n Pred.blocker_pred_x = blocker_x + Pred.blocker_vx * y \n if Pred.blocker_pred_x < 0: Pred.blocker_pred_x = abs(Pred.blocker_pred_x)\n elif Pred.blocker_pred_x > 170: Pred.blocker_pred_x = 170 - (Pred.blocker_pred_x - 170)\n \n if pred_ball_blocker >= Pred.blocker_pred_x-10 and pred_ball_blocker < Pred.blocker_pred_x+40:\n print(\"slice will hit blicker\")\n # don't slice \n # use origin ball vx to predict will hit blocker or not\n # if will hit blicker let ball go reverse direction\n y = abs((415 - 260) // ball_vy)\n pred_ball_blocker = pred_ball_1P + ball_vx * y\n bound = pred_ball_blocker // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n pred_ball_blocker = pred_ball_blocker - bound*200 \n else :\n pred_ball_blocker = 200 - (pred_ball_blocker - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n pred_ball_blocker = abs(pred_ball_blocker - (bound+1) *200)\n else :\n pred_ball_blocker = pred_ball_blocker + (abs(bound)*200)\n\n if pred_ball_blocker >= Pred.blocker_pred_x-10 and pred_ball_blocker < Pred.blocker_pred_x+40:\n print(\"will hit blocker, hit reversed direction\")\n if scene_info[\"ball_speed\"][0] > 0: return 2\n else: return 1\n else: \n print(\"will not hit blicker, do nothing\")\n return 0\n else:\n # slice\n print(\"slice will not hit blocker\")\n if scene_info[\"ball_speed\"][0] > 0: return 1\n else: return 2\n\n elif scene_info[\"ball_speed\"][1] > 0 : # 球正在向下 # ball goes down\n x = ( scene_info[\"platform_1P\"][1]-scene_info[\"ball\"][1] ) // scene_info[\"ball_speed\"][1] # 幾個frame以後會需要接 # x means how many frames before catch the ball\n Pred.pred = scene_info[\"ball\"][0]+(scene_info[\"ball_speed\"][0]*x) # 預測最終位置 # pred means predict ball landing site \n bound = Pred.pred // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n Pred.pred = Pred.pred - bound*200 \n else :\n Pred.pred = 200 - (Pred.pred - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n Pred.pred = abs(Pred.pred - (bound+1) *200)\n else :\n Pred.pred = Pred.pred + (abs(bound)*200)\n return move_to(player = '1P',pred = Pred.pred)\n \n else : # 球正在向上 # ball goes up\n return move_to(player = '1P',pred = 100)\n\n\n\n def ml_loop_for_2P(): # as same as 1P\n if scene_info[\"ball_speed\"][1] > 0 : \n return move_to(player = '2P',pred = 100)\n else : \n x = ( scene_info[\"platform_2P\"][1]+30-scene_info[\"ball\"][1] ) // scene_info[\"ball_speed\"][1] \n pred = scene_info[\"ball\"][0]+(scene_info[\"ball_speed\"][0]*x) \n bound = pred // 200 \n if (bound > 0):\n if (bound%2 == 0):\n pred = pred - bound*200 \n else :\n pred = 200 - (pred - 200*bound)\n elif (bound < 0) :\n if bound%2 ==1:\n pred = abs(pred - (bound+1) *200)\n else :\n pred = pred + (abs(bound)*200)\n return move_to(player = '2P',pred = pred)\n\n # 2. Inform the game process that ml process is ready\n comm.ml_ready()\n\n # 3. Start an endless loop\n while True:\n # 3.1. Receive the scene information sent from the game process\n scene_info = comm.recv_from_game()\n\n # 3.2. If either of two sides wins the game, do the updating or\n # resetting stuff and inform the game process when the ml process\n # is ready.\n if scene_info[\"status\"] != \"GAME_ALIVE\":\n # Do some updating or resetting stuff\n ball_served = False\n\n # 3.2.1 Inform the game process that\n # the ml process is ready for the next round\n comm.ml_ready()\n continue\n\n # 3.3 Put the code here to handle the scene information\n\n # 3.4 Send the instruction for this frame to the game process\n if not ball_served:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"SERVE_TO_LEFT\"})\n blocker_last_x = scene_info[\"blocker\"][0]\n Pred.last_command = 0\n ball_served = True\n else:\n if side == \"1P\":\n Pred.blocker_vx = scene_info[\"blocker\"][0] - blocker_last_x\n if scene_info[\"blocker\"][0] == 0: Pred.blocker_vx = 5\n elif scene_info[\"blocker\"][0] == 170: Pred.blocker_vx = -5\n command = ml_loop_for_1P()\n blocker_last_x = scene_info[\"blocker\"][0]\n Pred.last_command = command\n else:\n command = ml_loop_for_2P()\n\n if command == 0:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"NONE\"})\n elif command == 1:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"MOVE_RIGHT\"})\n else :\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"MOVE_LEFT\"})", "def create_tuning_functions(self):\r\n\t\tmotion_tuning = np.zeros((par['num_motion_tuned'], par['num_receptive_fields'], par['num_motion_dirs']), dtype=np.float32)\r\n\t\tfix_tuning = np.zeros((par['num_fix_tuned'], par['num_receptive_fields']), dtype=np.float32)\r\n\t\trule_tuning = np.zeros((par['num_rule_tuned'], par['num_rules']), dtype=np.float32)\r\n\r\n\t\t# generate list of prefered directions\r\n\t\t# dividing neurons by 2 since two equal groups representing two modalities\r\n\t\tpref_dirs = np.arange(0,360,360/(par['num_motion_tuned']//par['num_receptive_fields'])).astype(np.float32)\r\n\r\n\t\t# generate list of possible stimulus directions\r\n\t\tstim_dirs = np.arange(0,360,360/par['num_motion_dirs']).astype(np.float32)\r\n\r\n\t\tfor n in range(par['num_motion_tuned']//par['num_receptive_fields']):\r\n\t\t\tfor i in range(len(stim_dirs)):\r\n\t\t\t\tfor r in range(par['num_receptive_fields']):\r\n\t\t\t\t\td = np.cos((stim_dirs[i] - pref_dirs[n])/180*np.pi)\r\n\t\t\t\t\tn_ind = n+r*par['num_motion_tuned']//par['num_receptive_fields']\r\n\t\t\t\t\tmotion_tuning[n_ind,r,i] = par['tuning_height']*np.exp(par['kappa']*d)/np.exp(par['kappa'])\r\n\r\n\t\tfor n in range(par['num_fix_tuned']):\r\n\t\t\tfor i in range(par['num_receptive_fields']):\r\n\t\t\t\tif n%par['num_receptive_fields'] == i:\r\n\t\t\t\t\tfix_tuning[n,i] = par['tuning_height']\r\n\r\n\t\tneurons_per_rule = par['num_rule_tuned']//par['num_rules']\r\n\t\tfor n in range(par['num_rule_tuned']):\r\n\t\t\tfor i in range(par['num_rules']):\r\n\t\t\t\tif n in range(i*neurons_per_rule, (i+1)*neurons_per_rule):\r\n\t\t\t\t\trule_tuning[n,i] = par['tuning_height']\r\n\r\n\r\n\t\treturn motion_tuning, fix_tuning, rule_tuning", "def method2(self):\n cres=np.zeros(self.NL,dtype=float) # List of invariants\n # The U matrices from Fukui's method; storage...\n Ux_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n for il in range(self.NL):\n # ... and calculation of U matrices for each layer\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.LDM[il,ix ,iy ,:,:]\n mat2=self.LDM[il,(ix%self.kS.Nx)+1 ,iy ,:,:]\n mat3=self.LDM[il,ix ,(iy%self.kS.Ny)+1 ,:,:]\n \n Ux_loc[ix,iy]=np.dot(np.conj(mat1.T),mat2)[1,1]\n Uy_loc[ix,iy]=np.dot(np.conj(mat1.T),mat3)[1,1]\n \n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_loc[ix,iy]*Uy_loc[ix+1,iy]/Ux_loc[ix,iy+1]/Uy_loc[ix,iy])\n cres[il]+=(ftemp/2./pi/1j).real # Layer specific topological invariant\n \n return cres", "def phase_Venus_2(alpha):\n phase = 10.**(-0.4*( - 2.81914e-00*alpha + 8.39034e-03*alpha**2.))\n #1 Scale Properly\n h1 = phase_Venus_1(163.7) - 0. #Total height desired over range\n h2 = 10.**(-0.4*( - 2.81914e-00*163.7 + 8.39034e-03*163.7**2.)) - 10.**(-0.4*( - 2.81914e-00*179. + 8.39034e-03*179.**2.))\n phase = phase * h1/h2 #Scale so height is proper\n #2 Lateral movement to make two functions line up\n difference = phase_Venus_1(163.7) - h1/h2*(10.**(-0.4*( - 2.81914e-00*163.7 + 8.39034e-03*163.7**2.)))\n phase = phase + difference\n\n # + \n #-(- 2.81914e-00*163.7 + 8.39034e-03*163.7**2.)\n # - 1.\n return phase", "def process_lane_label_apollo_sim_3D(label_file):\n\n with open(label_file, 'r') as jf:\n lane_data = json.load(jf)\n\n centerlines_in = lane_data['laneList']\n lanelines_in = lane_data['laneBoundaryList']\n\n # register each lane by its id for access\n centerline_dict = {lane['id']: lane for lane in centerlines_in}\n centerline2del = {lane['id']: 0 for lane in centerlines_in}\n\n laneline_dict = {lane['id']: lane for lane in lanelines_in}\n laneline2del = {lane['id']: 0 for lane in lanelines_in}\n\n if merge:\n \"\"\"\n This merging algorithms serves the specific purpose of the 3D LaneNet representation where the sharing portion\n are duplicated into two lanes when dealing with many-to-one and one-to-many connectivity.\n \n The algorithm is a two-iteration solution. The first iteration solves all the one-to-one connections. A \n recursive function is applied to keep merging second segment with its successors, delete second segment's id \n from first segment's successor list, and add the merged successor lane's successor id to the list. The second \n iteration deals will one-to-many connection case. The first segment will be marked and to delete, and the second\n segment is augmented with the first segment at front.\n \"\"\"\n\n # iter1: merge centerlines based on successorList: all the modification applies directly back to centerlines_in and lanelines_in\n for id, centerlane in centerline_dict.items():\n merge_segments_recursive(centerlane, centerline_dict, laneline_dict, centerline2del, laneline2del)\n\n # iter2: handle 1 to 2 case by extending the second and marking the first segment to delete\n for id, centerlane in centerline_dict.items():\n if len(centerlane['successorList']) > 1:\n for second_id in centerlane['successorList']:\n centerlane2 = centerline_dict[second_id]\n # this condition could be removed, but kept for safe\n if -0.01 <= centerlane2['pos3DInCameraList'][0]['z'] - centerlane['pos3DInCameraList'][-1]['z'] < 0.01:\n centerlane2['pos3DInCameraList'] = centerlane['pos3DInCameraList'] + centerlane2['pos3DInCameraList']\n centerline2del[id] = 1\n\n # merge associated lanelines\n if centerlane['leftBoundaryId'] in laneline_dict and centerlane2['leftBoundaryId'] in laneline_dict:\n left_laneline = laneline_dict[centerlane['leftBoundaryId']]\n left_laneline2 = laneline_dict[centerlane2['leftBoundaryId']]\n # only merge those have not been dealt from other centerlane associations\n if -0.01 <= left_laneline2['pos3DInCameraList'][0]['z'] - left_laneline['pos3DInCameraList'][-1]['z'] < 0.01:\n left_laneline2['pos3DInCameraList'] = left_laneline['pos3DInCameraList'] + left_laneline2['pos3DInCameraList']\n laneline2del[left_laneline['id']] = 1\n\n if centerlane['rightBoundaryId'] in laneline_dict and centerlane2['rightBoundaryId'] in laneline_dict:\n right_laneline = laneline_dict[centerlane['rightBoundaryId']]\n right_laneline2 = laneline_dict[centerlane2['rightBoundaryId']]\n # only merge those have not been dealt from other centerlane associations\n if -0.01 <= right_laneline2['pos3DInCameraList'][0]['z'] - right_laneline['pos3DInCameraList'][-1]['z'] < 0.01:\n right_laneline2['pos3DInCameraList'] = right_laneline['pos3DInCameraList'] + right_laneline2['pos3DInCameraList']\n laneline2del[right_laneline['id']] = 1\n\n # convert to output format\n centerlanes_out = []\n for i, centerlane_in in enumerate(centerlines_in):\n if centerline2del[centerlane_in['id']] or centerlane_in['type'] == 'SHOULDER':\n # add its inner side associated laneline into delete list\n if centerlane_in['pos3DInCameraList'][0]['x'] < 0 and centerlane_in['rightBoundaryId'] in laneline_dict:\n laneline2del[centerlane_in['rightBoundaryId']] = 1\n elif centerlane_in['pos3DInCameraList'][0]['x'] > 0 and centerlane_in['leftBoundaryId'] in laneline_dict:\n laneline2del[centerlane_in['leftBoundaryId']] = 1\n continue\n centerlane_out = []\n for pt_3d in centerlane_in['pos3DInCameraList']:\n centerlane_out.append([pt_3d['x'], pt_3d['y'], pt_3d['z']])\n centerlanes_out.append(centerlane_out)\n\n lanelines_out = []\n for i, laneline_in in enumerate(lanelines_in):\n if laneline2del[laneline_in['id']]:\n continue\n laneline_out = []\n for pt_3d in laneline_in['pos3DInCameraList']:\n laneline_out.append([pt_3d['x'], pt_3d['y'], pt_3d['z']])\n lanelines_out.append(laneline_out)\n\n return centerlanes_out, lanelines_out, lane_data['cameraHeight'], lane_data['cameraPitch']" ]
[ "0.5802384", "0.5704981", "0.553651", "0.55297977", "0.55000675", "0.5490818", "0.53914285", "0.5377671", "0.5346241", "0.53404874", "0.5332665", "0.52918446", "0.52571434", "0.52550757", "0.52523685", "0.5244776", "0.5243008", "0.5238628", "0.5237757", "0.52282685", "0.5177067", "0.51749367", "0.51686555", "0.51682466", "0.5164786", "0.516114", "0.51610285", "0.5156842", "0.5138377", "0.5136782", "0.51311", "0.51296794", "0.512832", "0.5106441", "0.50905824", "0.5084782", "0.5076301", "0.5073889", "0.5036322", "0.50278974", "0.5025582", "0.5025124", "0.50182325", "0.50132823", "0.50106114", "0.50093406", "0.500764", "0.4999438", "0.4993044", "0.49859613", "0.49821734", "0.49730816", "0.49718782", "0.49676332", "0.49666318", "0.49572447", "0.49501634", "0.49462396", "0.49364373", "0.49265158", "0.49235973", "0.49130124", "0.49084195", "0.49046415", "0.49034503", "0.49000633", "0.48952493", "0.48903108", "0.48880905", "0.4888081", "0.48854077", "0.4883648", "0.48804864", "0.48761967", "0.48714182", "0.48705345", "0.48688695", "0.48662752", "0.48644176", "0.48630738", "0.4858476", "0.48519754", "0.48504937", "0.4843973", "0.4833736", "0.48252857", "0.48235562", "0.48196116", "0.48192087", "0.48191127", "0.4812764", "0.48060098", "0.48059514", "0.4802651", "0.47983813", "0.47948897", "0.47901818", "0.4789338", "0.47852713", "0.4779062" ]
0.7835292
0
Returns value of card. Always returns 11 for Ace.
def get_value(self): if self.rank == 'A': return 11 elif self.rank in ['J', 'Q', 'K']: return 10 else: return int(self.rank)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card", "def value(self, card):\n return self.valores[self.deck.index(card)]", "def card_value (card):\r\n value = card[0]\r\n if value in ['Jack','Queen','King']:\r\n return 10\r\n if value in [2,3,4,5,6,7,8,9,10]:\r\n return value\r\n else:\r\n raise 'CardValueError'", "def get_card_val(card):\n\n if card == '1':\n return 1\n if card == '2':\n return 2\n if card == '3':\n return 3\n else:\n return 4", "async def get_card_value(card):\n return ex.first_result(await ex.conn.fetchrow(\"SELECT value FROM blackjack.cards WHERE id = $1\", card))", "def get_value(self):\n \n value = 0\n ace = False\n\n for card in self.hand:\n value += VALUES[card.get_rank()]\n \n if (card.get_rank() == 'A'):\n ace = True\n \n if not ace:\n return value\n else:\n if (value + 10) <= 21:\n return (value + 10)\n else:\n return value", "def get_card (self, card):\n\t\treturn self._card", "def get_card(self):\n\n card = random.randint(1,13)\n return card", "def get_value(self):\r\n value, aces = 0, 0\r\n for card in self.hand:\r\n value += VALUES[card.get_rank()]\r\n # Keep track of the aces in Hand\r\n if card.get_rank() == \"A\":\r\n aces += 1\r\n if aces >= 1 and value + 10 <= 21:\r\n value += 10\r\n return value", "def get_card(self):\n return self.card", "def aces_high(card):\n if isinstance(card, Value):\n if card == Value.Ace:\n return 14\n return card.value\n\n if card.joker:\n return 15\n if card.value == Value.Ace:\n return 14\n return card.value.value", "def card(self):\n return self.cdb.name_to_card[self.card_name]", "def define_card_value(char):\n if char == '2':\n return Value.TWO\n elif char == '3':\n return Value.THREE\n elif char == '4':\n return Value.FOUR\n elif char == '5':\n return Value.FIVE\n elif char == '6':\n return Value.SIX\n elif char == '7':\n return Value.SEVEN\n elif char == '8':\n return Value.EIGHT\n elif char == '9':\n return Value.NINE\n elif char == 'T':\n return Value.TEN\n elif char == 'J':\n return Value.JACK\n elif char == 'Q':\n return Value.QUEEN\n elif char == 'K':\n return Value.KING\n elif char == 'A':\n return Value.ACE\n else:\n return Value.UNDEFINED", "def take_card(self, card_color=None):\r\n Card = self.deck.take_card(card_color)\r\n return Card.value if Card.color == Color.BLACK else Card.value * -1", "def get_value(self):\n global VALUES\n hand_value = 0\n has_ace = False\n\n for card in self.hand:\n v = VALUES[card.get_rank()]\n hand_value += v\n if card.get_rank() is 'A':\n has_ace = True\n\n if not has_ace:\n return hand_value\n else:\n if hand_value + 10 <= 21:\n return hand_value + 10\n else:\n return hand_value", "def get_value(self):\n bj_rankings = {'Ace': 11, 'King': 10, 'Queen': 10, 'Jack': 10,\n 10: 10, 9: 9, 8: 8, 7: 7, 6: 6, 5: 5, 4: 4, 3: 3, 2: 2}\n value = 0\n for card in self.cards:\n value += bj_rankings[card.rank]\n\n if value > 21:\n bj_rankings['Ace'] = 1\n value = 0\n for card in self.cards:\n value += bj_rankings[card.rank]\n return value", "def get_card_info(card):\n result = ((card-1)/13 + 1, card - ((card-1)/13)*13)\n return result", "def card_balance(self):\n return self._card_balance", "def BJValue(self):\r\n #if the face value of a card is greater or equals to 10\r\n if self.rank >= 10:\r\n #count the value as 10\r\n return 10\r\n #if the face value of a card is less than 10\r\n else:\r\n #return the face value of the card\r\n return self.rank", "def blackjackValue(self):\n NUMBERRANKS = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\n FACECARDS = [\"jack\", \"queen\", \"king\"]\n ACE = [\"ace\"]\n if self.rank in NUMBERRANKS:\n return int(self.rank)\n elif self.rank in FACECARDS:\n return 10\n elif self.rank in ACE:\n return 11", "def card_currency(self):\n return self._card_currency", "def card(self, c=None):\n if c: self._card = c\n return self._card", "def get_value(self):\n #Finds all of the values in the cards\n score_list=[Card.get_value(card) for card in self.cards]\n #Sums the scores\n if self.num_cards() > 0:\n total_score=reduce((lambda x,y: x+y),score_list)\n return total_score\n else:\n return 0", "def get_card_str(self, card):\n card_str = str(card)\n if card == 11:\n card_str = \"Jack\"\n if card == 12:\n card_str = \"Queen\"\n if card == 13:\n card_str = \"King\"\n if card == 1:\n card_str = \"Ace\"\n \n return card_str", "def total(self):\n for card in self.cards:\n if not card.value:\n return 0\n t = 0\n for card in self.cards:\n t += card.value\n contains_ace = False\n for card in self.cards:\n if card.value == BJ_Card.ACE_VALUE:\n contains_ace = True\n if contains_ace and t <= 11:\n t += 10\n return t", "def deal_card(self):\n return self._deal(1)[0]", "def get_card(self):\n if self.card_suit in self.RED_SUITS:\n color = 'red'\n else:\n color = 'blue'\n\n return colored(self.card_name, 'yellow') + colored(self.card_suit,\n color)", "def ac(self):\n if self.armor:\n return self.armor.ac\n return 10 + self.dexterity", "def get_credit_card_number(self):\n\t\tif len(self.credit_card_number) == 16:\n\t\t\treturn self.credit_card_number\n\t\tr(400, {\"message\" : \"please provide the amount to process\"})\n\t\treturn", "def getCardData(self):\n \n return self._cardData", "def get_card_number():\n\n return get_or_append_details('card_number', \"Please enter your credit card number\")", "def read_card(name_card):\n\n card=Cards.Card(name_card)\n return card.info", "def action_peek_cards(self) -> int:\n for card in self.house.hand.cards:\n if not card.is_open:\n return int(card.value)", "def balance(self, card_number):\n database_cursor.execute(f\"SELECT balance FROM card WHERE number = {card_number};\")\n return database_cursor.fetchone()[0]", "def hand_value(hand):\n val = 0 \n for card in hand:\n val += card.value\n\n return val", "def ace_hand_value(ace_count, hand_value):\r\n #case1, the case where the Ace in question is worth 11 points,\r\n # doesn't reduce 11 to 10 in order to be more clear about where these\r\n # values are coming from. ace_count is reduced by 1 to offset 11 being\r\n # counted separately. \r\n case1 = hand_value + 11 + (ace_count - 1)\r\n if case1 <= 21:\r\n return case1\r\n \r\n #Implied \"if case1 > 21:\"\r\n #case2 is the case where the Ace in question is worth 1 point.\r\n case2 = hand_value + ace_count\r\n return case2", "def get_value(self, character):\n return self.value", "def deal_cards(self):\n self.card = random.randint(1, 13)\n return self.card", "def get_value(self):\n if self.name in ['1','2','3','4','5','6','7','8', '9', '10']:\n return int(self.name)\n if self.name in ['J','Q','K']:\n return 10\n if self.name == 'A':\n return 1", "def card(n):\r\n assert type(n) == int and n > 0 and n <= 13, \"Bad card n\"\r\n specials = {1: 'A', 11: 'J', 12: 'Q', 13: 'K'}\r\n return specials.get(n, str(n))", "def card_data(self) -> CardEmbossing:\n return self._card_data", "def get_amount(self): \n return len(self.get_cards())", "def test_card_value(mock_card):\n assert mock_card.value == 1", "def getCardNumber(self,message):\n card = re.findall(Analyzer.rgxCard,message.lower())\n return card[0]", "def get_card_product(self):\n\n return self.card_product", "def validate_card():\r\n print(\"Please insert your card\")\r\n card = int(input(\"Please enter 1 if you entered your card\"))\r\n return card", "def get_balance(card):\n data = {\n \"Card.Number\": card[0],\n \"Card.Pin\": card[1],\n }\n\n response = requests.post(BALANCE_URL, data=data, headers=HEADERS)\n if response.status_code == 200:\n match = BALANCE_RE.search(response.text)\n if match:\n return float(match.group(1))", "def get_score(self, card_index: int = 0) -> int:\n return self.get_score_list[card_index]", "def get_suit(self):\r\n return self.suit", "def score(self) -> int:\n card_values = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n '10': 10,\n 'JACK': 10,\n 'QUEEN': 10,\n 'KING': 10,\n 'ACE': 11}\n hand_value = []\n for i in self.cards:\n hand_value.append(card_values[i.value])\n while sum(hand_value) > 21 and 11 in hand_value:\n for i, j in enumerate(hand_value):\n if j == 11:\n hand_value[i] = 1\n break\n else:\n pass\n return sum(hand_value)", "def get_value(self, character):\n return character.numbers[self.item]", "def get_cash(self):\r\n return self.cash", "def get_card(self, suit, face):\n for card in self.deck:\n if card.suit == suit and card.value == face:\n return card", "def get_card(self, slot):\n return self._starting_card[slot]", "def calculate_points(hand): \r\n hand_value = 0\r\n ace_count = 0 \r\n \r\n #Finds value of non-Ace cards, and counts number of Aces.\r\n for card in hand:\r\n if card[0] == 'Ace':\r\n ace_count += 1\r\n else:\r\n # Calls card_value function to evaluate the card.\r\n hand_value += card_value(card) \r\n \r\n #Ace card present\r\n if ace_count > 0:\r\n return ace_hand_value(ace_count, hand_value)\r\n \r\n #Implied \"if ace_count == 0:\"\r\n return hand_value", "def sum_cards(self):\n has_ace = False\n sum = 0\n\n # Add up players cards\n for card in self.cards:\n if card.card_value == \"ace\":\n has_ace = True\n sum += card.game_value\n\n # Handle case where ace plays low\n if sum > 21 and has_ace:\n sum -= 10\n\n return sum", "def card_output():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n return random.choice(cards)", "def getCash(self) -> int:\n return self.state[CASH]", "def card_balance_in_card_currency(self):\n return self._card_balance_in_card_currency", "def __str__(self):\n return self.card_no", "def __repr__(self):\n return f\"Card({self.face}, {self.value}, {self.suit})\"", "def hand_value(self):\n return deck.bj_hand_value(self.hand1)", "def testCard(self):\n # test1\n cardObj1 = Card('A','d')\n self.assertEquals(1,cardObj1.get_rank())\n self.assertEquals('d',cardObj1.get_suit())\n # test2\n cardObj2 = Card('J','d')\n self.assertEquals(10,cardObj2.get_rank())\n # test3\n cardObj3 = Card(5,'d')\n self.assertEquals(5,cardObj3.get_rank())", "def blackjack_result(cards):\n sum = 0\n a_cards = 0\n dictionary = {\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n 'T': 10,\n 'J': 10,\n 'Q': 10,\n 'K': 10,\n }\n for card in cards.split():\n if card in dictionary:\n sum = sum + dictionary[card]\n elif card == 'A':\n a_cards = a_cards + 1\n\n if a_cards > 0:\n for i in range(a_cards):\n if a_cards > 1:\n sum = sum + 1\n a_cards = a_cards - 1\n else:\n if sum + 11 < 22:\n sum = sum + 11\n else:\n sum = sum + 1\n\n return sum", "def test_value(self):\n hand = self._hand\n cards = [BjCard('clubs', '10'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 21)", "def controller_card(self):\n\n model = ct.c_wchar_p()\n self.lib.GetControllerCardModel(ct.pointer(model))\n\n return model.value", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card", "def card_type(self):\n return self._card_type", "def get_value(self, device_name):\n return epics.caget(str(device_name))", "def get_value(self, character):\n raise NotImplementedError()", "def calculate_points(card):\n for value in scores.keys():\n if value == card.value:\n card_score = scores[card.value]\n return card_score", "def card_id(self):\n return self._card_type.numbers if self._card_type is not None else self._repport", "def get_cards_sum(self):\n # sum the non-aces first\n s = sum([card.value for card in self.current_hand\n if card.type != Deck.ace_card])\n # find the number of aces in the deck\n aces_in_deck = sum([1 for card in self.current_hand\n if card.type == Deck.ace_card])\n # we now have to add `aces_in_deck` aces to\n # the total sum of the cards\n s = self.__add_aces(s, aces_in_deck)\n\n return s", "def __repr__(self):\n val = self.value\n for k, v in Card.VALUES.iteritems():\n if self.value == v:\n val = k\n return str.format('{0}{1}', val, self.suit)", "def get_value_from_card(card, field):\n with open(card) as f:\n for line in f:\n if field in line.strip():\n return line.strip().split()[-1]", "def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))", "def get_value(self, character):\n return -self.item.get_value(character)", "def get_small_joker_value(deck):\n \n return max(deck) - 1", "def hand_value_check(self, hand):\r\n hand_value = 0\r\n ace = 0\r\n result = []\r\n a = 0\r\n for card in hand: # calculate value of a hand\r\n if card.value < 10:\r\n a = card.value\r\n elif card.value in range(10, 14):\r\n a = 10\r\n elif card.value == 14: # keep track of Aces that may be counted both as 11 and as 1\r\n a = 11\r\n ace += 1\r\n hand_value += a\r\n\r\n if ace > 0: # if hand had aces, return all possible hand values\r\n for i in range(0, ace + 1):\r\n result.append(hand_value)\r\n hand_value -= 10\r\n self.display_hand_val = result\r\n return result\r\n else:\r\n result.append(hand_value)\r\n self.display_hand_val = result\r\n return result", "def _score_hand(hand):\n\n score = 0\n ace = False\n\n for next_card in hand:\n\n # get the value of the card\n card_value = next_card[0]\n\n # if it is an ace and we do not hold one, the value is 11 instead of 1\n if card_value == 1 and not ace:\n ace = True\n card_value = 11\n\n # add up the value to the score\n score += card_value\n\n # if we would bust, check if there is an ace and substract\n # 10 from the value (11 - 1). Also, set the ace variable to False.\n if score > 21 and ace:\n score -= 10\n ace = False\n\n return score", "def _update_value(self) -> int:\n\n value_list = [card.value if card.value <= 10 else 10 for card in self]\n hand_value = sum(value_list)\n\n # Checks to see if any Aces can be worth 11 points instead of 1 point\n while value_list.count(1) > 0 and (21 - hand_value) >= 10:\n value_list[value_list.index(1)] = 11\n hand_value = sum(value_list)\n\n self._value = hand_value", "def highCard(self):\n return max(self)", "def num_to_card(card_num):\n card_num = int(card_num)\n if card_num <= 51:\n return (card_num % 13) + 1\n return 0", "def getValue(self) -> int:\n ...", "def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count", "def getCard(self,id):\n if not self.cardExists(id):\n return None\n return self.cards[id]", "def _translate_card(self):\n if isinstance(self.suit, int):\n\n if self.suit == 0:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of spades\".format(name)\n\n elif self.suit == 1:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of hearts\".format(name)\n\n elif self.suit == 2:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of diamonds\".format(name)\n\n elif self.suit == 3:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of clubs\".format(name)\n\n else:\n raise ValueError(\"The integer passed to the method must be 0, 1, 2, 3\")\n\n else:\n raise TypeError(\"The argument for the method must be an integer\")\n\n return self.name, self.values", "def strToCardValue(self, str):\n CARD_REPRESENTATION = {v: k for k, v in Card.ENGLISH_REPRESENTATION.items()}\n return CARD_REPRESENTATION[str]", "def account_balance_in_card_currency(self):\n return self._account_balance_in_card_currency", "def _get_card(self, name: str) -> Dict:", "def convNumToCard(cardNum):\n\n\tcardDict = {14:\"A\", 13:\"K\", 12:\"Q\", 11:\"J\"}\n\n\tif cardNum > 10:\n\t\treturn cardDict[cardNum]\n\telse: return str(cardNum)", "def value(x):\r\n val = 0\r\n ace_count = 0;\r\n for i in range(len(x)):\r\n if x[i] == 1:\r\n ace_count += 1\r\n val += 11\r\n else:\r\n val += x[i]\r\n while val > 21 and ace_count != 0:\r\n val -= 10\r\n ace_count -= 1\r\n return val", "def __update_values(self):\r\n\r\n\t\tv = [0]\r\n\t\thas_ace = False\r\n\r\n\t\t# two values for hands with aces\r\n\t\tfor card in self.cards:\r\n\t\t\tv[0] += card.value\r\n\t\t\tif card.rank == 'Ace':\r\n\t\t\t\thas_ace = True\r\n\r\n\t\t# hand is soft if below 12\r\n\t\tif has_ace:\r\n\t\t\tif v[0] < 12:\r\n\t\t\t\tv.append(v[0] + 10)\r\n\r\n\t\tself.values = v", "def getValue(currency=None):", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n random_card = random.choice(cards)\n return random_card", "def hand_value_check(self, hand):\r\n hand_value = 0\r\n result = []\r\n a = 0\r\n for card in hand: # calculate value of a hand\r\n if card.value < 10:\r\n a = card.value\r\n elif card.value in range(10, 14):\r\n a = 10\r\n elif card.value == 14: # keep track of Aces that may be counted both as 11 and as 1\r\n a = 11\r\n hand_value += a\r\n\r\n result.append(hand_value)\r\n self.display_hand_val = result\r\n return result", "def get_value(self, character):\n out = 1\n for i in self.items:\n out *= i.get_value(character)\n return out", "def get_card(self):\n return self.deck.pop()", "def get_value(self, character):\n return sum(i.get_value(character) for i in self.items)", "def calculate_value(self, hand):\n global FACE_CARDS\n #could refactor the 2 hand possiblities into methods of a Dealer and Player Class\n if hand == \"player\":\n if self.player_hand[-1].value in FACE_CARDS:\n self.player_value += 10\n elif self.player_hand[-1].value == \"A\":\n self.player_value += 11\n self.player_ace_count += 1\n else:\n self.player_value += int(self.player_hand[-1].value)\n\n if self.player_value > 21:\n if self.player_ace_count > self.player_almost_bust:\n #To prevent a Bust, your Ace became a one\n self.player_value -= 10\n self.player_almost_bust += 1\n else:\n self.player_lose()\n elif self.player_value == 21:\n self.blackjack = True\n self.endgame()\n\n elif hand == \"dealer\":\n if len(self.dealer_hand) > 1:\n if self.dealer_hand[-1].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[-1].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[-1].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n elif self.dealer_value == 21:\n self.player_lose()" ]
[ "0.8789612", "0.8161618", "0.7929443", "0.7905337", "0.76787597", "0.75404507", "0.7418031", "0.7387034", "0.7376184", "0.7270906", "0.72434753", "0.713611", "0.708218", "0.7063227", "0.6924992", "0.69160146", "0.68489075", "0.6820446", "0.6620286", "0.657642", "0.65416074", "0.6522254", "0.65176064", "0.6503941", "0.64873934", "0.64861053", "0.63984656", "0.63910365", "0.6370247", "0.6368718", "0.6350347", "0.62608176", "0.62114966", "0.6192009", "0.6190115", "0.61645573", "0.61564875", "0.61416644", "0.6134358", "0.6107144", "0.60930675", "0.60702187", "0.60691607", "0.6065586", "0.6063705", "0.6055047", "0.60524917", "0.60260147", "0.60219187", "0.60193425", "0.60183203", "0.6002383", "0.59932667", "0.59909695", "0.5936188", "0.5923628", "0.5894926", "0.5888816", "0.5871006", "0.5860224", "0.58388466", "0.5835805", "0.5826527", "0.5822014", "0.581533", "0.57993186", "0.5777997", "0.5776081", "0.5765643", "0.57583904", "0.5758371", "0.5754913", "0.57524437", "0.5746114", "0.5722896", "0.5703217", "0.56874883", "0.5685932", "0.5681492", "0.5675761", "0.56619537", "0.5659929", "0.56531245", "0.56502676", "0.5642062", "0.56312096", "0.56290746", "0.5609603", "0.56082034", "0.56067646", "0.5599668", "0.5594321", "0.55932134", "0.5592214", "0.55722064", "0.55661416", "0.55660826", "0.5559402", "0.5558395", "0.5551928" ]
0.61869216
35
Convert a column number into a column letter (3 > 'C') Right shift the column col_idx by 26 to find column letters in reverse order. These numbers are 1based, and can be converted to ASCII ordinals by adding 64.
def _get_column_letter(col_idx): # these indicies corrospond to A -> ZZZ and include all allowed # columns if not 1 <= col_idx <= 18278: raise ValueError("Invalid column index {0}".format(col_idx)) letters = [] while col_idx > 0: col_idx, remainder = divmod(col_idx, 26) # check for exact division and borrow if needed if remainder == 0: remainder = 26 col_idx -= 1 letters.append(chr(remainder+64)) return ''.join(reversed(letters))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _index_to_column(i, column=''):\n\n # A dictionary of numbers to letters starting at 0, e.g.\n # {0: 'A', 1: 'B' ...}\n num_to_alpha = {k:v for k, v in enumerate(string.ascii_uppercase, 0)}\n # If our index is divisble by 26, we need to get recursive and add\n # additional letters.\n div = i // 26\n if div:\n column = index_to_column(div - 1, column)\n # Combine results in case things got all inception like.\n column = column + num_to_alpha[i % 26]\n\n return column", "def get_column_letter_new(column_index):\n # these indicies correspond to A -> ZZZ and include all allowed\n # columns\n if not 1 <= column_index <= 18278:\n msg = 'Column index out of bounds: %s' % column_index\n raise ValueError(msg)\n letters = []\n while column_index > 0:\n column_index, remainder = divmod(column_index, 26)\n # check for exact division and borrow if needed\n if remainder == 0:\n remainder = 26\n column_index -= 1\n letters.append(chr(remainder+64))\n return ''.join(reversed(letters))", "def num_to_col(n):\n\n # based on https://stackoverflow.com/a/23862195\n string = \"\"\n while n > 0:\n n, remainder = divmod(n - 1, 26)\n string = chr(65 + remainder) + string\n return string", "def reverseCol(input):\n try:\n parsed = chr(input + ord('A'))\n except TypeError:\n raise PositionException, \"Bad input for col; %s\" % input\n if not 0 <= input < CHESS_COLS:\n raise PositionException, \"Col out of range; %d parsed as %s.\" \\\n % (input, parsed)\n return parsed", "def convert_number_to_excel_colname(n):\n\n assert 0 < n <= 256\n\n alphabet = [chr(x) for x in xrange(65, 91)]\n\n if n > 26:\n return '{0}{1}'.format(alphabet[(n/26) - 1], alphabet[(n%26) - 1])\n else:\n return alphabet[(n%26) - 1]", "def getColIdx(self, col):\n try: \n return int(col)\n except:\n return ord(col)-ord('a')", "def convertColumn(cls, column, row = None):\n\n\t\t#Convert Column if needed\n\t\tif (isinstance(column, int)):\n\t\t\t#Check for past Z\n\t\t\tcount = 0\n\t\t\tbonusColumn = \"\"\n\t\t\twhile True:\n\t\t\t\tcount += 1\n\t\t\t\t#Does the ascii letter go past Z? If so, create addition letter\n\t\t\t\tif (openpyxl.utils.get_column_letter(count).isupper()):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcolumn -= 26\n\t\t\t\t\tbonusColumn = openpyxl.utils.get_column_letter(count)\n\n\t\t\t#Set new Column\n\t\t\tcolumn = bonusColumn + openpyxl.utils.get_column_letter(column)\n\n\t\tif (row is None):\n\t\t\treturn column\n\t\treturn f\"{column}{row}\"", "def getColIdx(self, col):\n try:\n return int(col)\n except:\n return ord(col)-ord('a')", "def column_to_letter(self, pos):\n column_dict = {}\n column_dict[0] = 'a'\n column_dict[1] = 'b'\n column_dict[2] = 'c'\n column_dict[3] = 'd'\n column_dict[4] = 'e'\n column_dict[5] = 'f'\n column_dict[6] = 'g'\n column_dict[7] = 'h'\n column_dict[8] = 'i'\n return column_dict[pos]", "def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col", "def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col", "def letter_to_column(self, pos):\n column_dict = {}\n column_dict['a'] = 0\n column_dict['b'] = 1\n column_dict['c'] = 2\n column_dict['d'] = 3\n column_dict['e'] = 4\n column_dict['f'] = 5\n column_dict['g'] = 6\n column_dict['h'] = 7\n column_dict['i'] = 8\n return column_dict[pos[0]]", "def letter_to_column(self, pos):\n column_dict = {}\n column_dict['a'] = 0\n column_dict['b'] = 1\n column_dict['c'] = 2\n column_dict['d'] = 3\n column_dict['e'] = 4\n column_dict['f'] = 5\n column_dict['g'] = 6\n column_dict['h'] = 7\n column_dict['i'] = 8\n return column_dict[pos[0]]", "def letter_to_column(self, pos):\n column_dict = {}\n column_dict['a'] = 0\n column_dict['b'] = 1\n column_dict['c'] = 2\n column_dict['d'] = 3\n column_dict['e'] = 4\n column_dict['f'] = 5\n column_dict['g'] = 6\n column_dict['h'] = 7\n column_dict['i'] = 8\n return column_dict[pos[0]]", "def canonicalize_column_index(self, line, col):\n if col < 0:\n col += self.col_lens[line] + 1\n assert col >= 0\n return col", "def calculate_ascii(col, col_count, use_third, first_letter, third_letter):\n if col <= 26:\n # if it's under 26 columns, just use a single letter\n ascii_col = chr(col + 64)\n elif use_third:\n if col_count > 26:\n # first_letter describes the coordinate of what the first letter should be -\n # every 26 iterations, it increases by one to switch the first letter up by one\n first_letter += 1\n # col_count keeps track of what column you're at in the current first_letter iteration\n col_count = 1\n if first_letter > 90:\n third_letter += 1\n first_letter = 65\n ascii_col = chr(third_letter) + chr(first_letter) + chr((col_count + 64))\n\n col_count += 1\n else:\n # if it's over 26 columns, you have to calculate two different letters\n if col_count > 26:\n # first_letter describes the coordinate of what the first letter should be -\n # every 26 iterations, it increases by one to switch the first letter up by one\n first_letter += 1\n # col_count keeps track of what column you're at in the current first_letter iteration\n col_count = 1\n\n ascii_col = chr(first_letter) + chr((col_count + 64))\n\n if ascii_col == 'ZZ':\n use_third = True\n\n col_count += 1\n return ascii_col, col_count, use_third, first_letter, third_letter", "def col_to_num(col_str):\n expn = 0\n col_num = 0\n for char in reversed(col_str):\n col_num += (ord(char) - ord('A') + 1) * (26 ** expn)\n expn += 1\n\n return col_num", "def excel_style(col):\n result = []\n while col:\n col, rem = divmod(col-1, 26)\n result[:0] = LETTERS[rem]\n return ''.join(result)", "def toindex(col, row):\n a2z = 'ABCDEFGHIJLKMNOPQRSTUVWXYZ'\n\n total = 0\n mult = 0\n for char in col:\n total += (a2z.find(char) + (26 * mult))\n mult += 1\n\n return total, row - 1", "def find_index_column(sheet, name, num):\n for idx in range(1, 26):\n if sheet[chr(idx + 64) + str(num)].value == name:\n index_col = chr(64 + idx)\n break\n return index_col", "def rotate_letter(c, num):\n return chr(((ord(c) - 97) + num) % 26 + 97)", "def index_to_letter(idx):\r\n if 0 <= idx < 20:\r\n return chr(97 + idx)\r\n else:\r\n raise ValueError('A wrong idx value supplied.')", "def indexToPosition(self, col, row):\n columns = \"ABCDEFGH\"\n return columns[col] + str(row + 1)", "def excel_style(row, col):\n quot, rem = divmod(ord(col)-ord('A'), 26)\n return((chr(quot-1 + ord('A')) if quot else '') +\n (chr(rem + ord('A')) + str(row)))", "def index_to_letter(index):\r\n return chr(index + CHAR_A)", "def index_to_letter(index):\r\n return chr(index + CHAR_A)", "def char_from_number(number):\r\n\r\n base = 26\r\n\r\n rval = \"\"\r\n\r\n if number == 0:\r\n rval = 'A'\r\n\r\n while number != 0:\r\n remainder = number % base\r\n new_char = chr(ord('A') + remainder)\r\n rval = new_char + rval\r\n number //= base\r\n\r\n return rval", "def _index_to_char(self, index):\n return chr(index + ord('a'))", "def letter_num(num: int):\n if abs(num) > 26 or num == 0:\n let = ord('a') + 26 - 1\n else:\n let = ord('a') + abs(num) - 1\n return chr(let)", "def rot(c,n):\n if 'a' <= c <= 'z': \n new_ord = ord(c) + n\n if new_ord > ord('z'):\n new_ord = new_ord - 26\n elif 'A' <= c <= 'Z': \n new_ord = ord(c) + n\n if new_ord > ord('Z'):\n new_ord = new_ord - 26\n else: \n new_ord = ord(c)\n return chr(new_ord)", "def shift_column(code, n, s):\n def shift(s, n):\n if n == 0 or len(s) == 1:\n return s\n else:\n return shift(s[-1] + s[:-1], n-1)\n\n if type(code) is not list:\n return code\n else:\n n = int(n)\n s = int(s) % len(code)\n if s > 0 and n < len(code[0]):\n column = select_column(code, n)\n column = shift(column, s)\n for i in range(0, len(column)):\n new = list(code[i])\n new[n] = column[i]\n code[i] = ''.join(new)\n return code\n else:\n return code", "def parseCol(input):\n try:\n parsed = ord(input.upper()) - ord('A')\n except AttributeError:\n raise PositionException, \"Bad input for col; %s\" % input\n if not 0 <= parsed < CHESS_COLS:\n raise PositionException, \"Col out of range; %s parsed as %d.\" \\\n % (input, parsed)\n return parsed", "def get_column_reference(headers, name):\n return chr(ord('A') + headers.index(name))", "def Right(n=1):\n return ESC + str(n) + 'C'", "def symbol_to_col(symbol, size_H):\n num_ascii = ord(symbol)\n\n index = num_ascii % size_H\n sign = 1 if (num_ascii < size_H) else -1\n\n return sign, index", "def columnize(self,\r\n index,\r\n convert=SEMICOLON,\r\n columnchar=UNDERLINE,\r\n undo=False,\r\n counters=False,\r\n only_counter=True):\r\n\r\n if index in self.indexes():\r\n\r\n note_temp = self.get_note(index)\r\n\r\n\r\n\r\n if not undo:\r\n\r\n if not only_counter:\r\n note_temp.text = note_temp.text.replace(convert,\r\n BLANK+columnchar+BLANK)\r\n\r\n\r\n\r\n if '/COL/' not in note_temp.text:\r\n note_temp = COLUMNBEGIN + note_temp\r\n if '/ENDCOL/' not in note_temp.text:\r\n note_temp = note_temp + COLUMNEND\r\n\r\n newtext = EMPTYCHAR\r\n lines = note_temp.text.split(EOL)\r\n endline = len(lines)\r\n for counter, line in enumerate(lines):\r\n if columnchar not in line:\r\n newtext += (POUND+ str(counter) + BLANK + columnchar) \\\r\n *(counters and counter not in [0,endline]) \\\r\n + line + BLANK + columnchar + EOL\r\n else:\r\n newtext += (POUND+ str(counter) + BLANK + columnchar)\\\r\n *(counters and counter not in [0,endline])\\\r\n + line + EOL\r\n\r\n else:\r\n newtext = EMPTYCHAR\r\n lines = note_temp.text.split(EOL)\r\n for counter, line in enumerate(lines):\r\n if counter == 0 and '/COL/' in line:\r\n line = line.replace('/COL/',EMPTYCHAR)\r\n if counter == len(lines) and '/ENDCOL/' in line:\r\n line = line.replace('/ENDCOL/',EMPTYCHAR)\r\n newtext += line + EOL\r\n newtext = newtext.replace(BLANK + columnchar + BLANK,convert)\r\n\r\n\r\n\r\n self.softdelete(index)\r\n self.addnew(note_temp.keyset,\r\n newtext,\r\n note_temp.meta,\r\n right_at=True,\r\n ind=index)", "def letter_and_index_conversion(value, grid_size):\n\n number_list = [i for i in range(1, 27)]\n col_dictionary = dict(zip(alphabet_list, number_list))\n\n if type(value) == str and value.upper() in col_dictionary:\n return col_dictionary[value.upper()] # return index\n elif type(value) == int and value > 0 and value <= grid_size:\n letter = list(col_dictionary.keys())[list(col_dictionary.values()).index(value)]\n return letter # return Letter\n else:\n raise ValueError(\"Invalid value please enter a number or a letter\")", "def to_index(self, char):\n return ord(char) - ord(\"A\") - 32", "def prefer_alphabet(i):\n if 0 <= i <= 25:\n return chr(i + 65)\n if 26 <= i <= 51:\n return chr(i + 97 - 26)\n return str(i)", "def ordChar(self, char):\n char = char.upper()\n num = ord(char) - 65\n return num", "def COL(x):\n return (x & 7)", "def COL(x):\n return (x & 7)", "def sort_nicely(col):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key[col])]\n return alphanum_key", "def _scale_back_ascii(self, col_idx, input_field, output_field):\n starts = self._coldefs.starts[:]\n spans = self._coldefs.spans\n format = self._coldefs[col_idx].format\n\n # The the index of the \"end\" column of the record, beyond\n # which we can't write\n end = super().field(-1).itemsize\n starts.append(end + starts[-1])\n\n if col_idx > 0:\n lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1]\n else:\n lead = 0\n\n if lead < 0:\n warnings.warn(\n f\"Column {col_idx + 1} starting point overlaps the previous column.\"\n )\n\n trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx]\n\n if trail < 0:\n warnings.warn(\n f\"Column {col_idx + 1} ending point overlaps the next column.\"\n )\n\n # TODO: It would be nice if these string column formatting\n # details were left to a specialized class, as is the case\n # with FormatX and FormatP\n if \"A\" in format:\n _pc = \"{:\"\n else:\n _pc = \"{:>\"\n\n fmt = \"\".join([_pc, format[1:], ASCII2STR[format[0]], \"}\", (\" \" * trail)])\n\n # Even if the format precision is 0, we should output a decimal point\n # as long as there is space to do so--not including a decimal point in\n # a float value is discouraged by the FITS Standard\n trailing_decimal = format.precision == 0 and format.format in (\"F\", \"E\", \"D\")\n\n # not using numarray.strings's num2char because the\n # result is not allowed to expand (as C/Python does).\n for jdx, value in enumerate(input_field):\n value = fmt.format(value)\n if len(value) > starts[col_idx + 1] - starts[col_idx]:\n raise ValueError(\n \"Value {!r} does not fit into the output's itemsize of {}.\".format(\n value, spans[col_idx]\n )\n )\n\n if trailing_decimal and value[0] == \" \":\n # We have some extra space in the field for the trailing\n # decimal point\n value = value[1:] + \".\"\n\n output_field[jdx] = value\n\n # Replace exponent separator in floating point numbers\n if \"D\" in format:\n output_field[:] = output_field.replace(b\"E\", b\"D\")", "def _chr_ord(x):\n return chr(ord(x))", "def CHAR(table_number):\n return unichr(table_number)", "def chrNum(self, num):\n char = chr(num + 65) \n return char", "def rot(c, n):\n if 'a' <= c <= 'z':\n l = ord(c) + n\n if l > ord('z'):\n l -= 26\n return chr(l)\n elif 'A' <= c <= 'Z':\n l = ord(c) + n\n if l > ord('Z'):\n l -= 26\n return chr(l)\n else:\n return c", "def get_alphabet(number):\n return chr(number + 96)", "def get_alphabet(number):\n return chr(number + 96)", "def fix_column(infile, outfile, colnum):\n with open(infile, mode='r') as fid:\n colnum -= 1 # adj. colnum to account for zero-based indexing\n cread = csv.reader(fid)\n ctr = 0\n\n with open(outfile, mode='w') as new_file:\n cwrite = csv.writer(new_file)\n for row in cread:\n if ctr==0:\n outrow = row\n ctr+=1\n else:\n outrow = row[:colnum] + [stamp2iso(row[colnum])] + row[colnum+1:] \n cwrite.writerow(outrow)", "def convert_numtoletter(n):\r\n L = seats[0][n-1] #letter\r\n return L", "def c(k):\n if isinstance(k, str):\n return k.lower() if ord(k) % 2 == 0 else k.upper()\n return k", "def get_column(puzzle, col_num):\n\n puzzle_list = puzzle.strip().split('\\n')\n column = ''\n for row in puzzle_list:\n column += row[col_num]\n\n return column", "def get_reverse_column(column):\n for suffix, other in [('_1', '_2'), ('_2', '_1')]:\n if column.endswith(suffix):\n return column[:-len(suffix)] + other\n return column", "def _excel2num(x: str) -> int:\n index = 0\n\n for c in x.upper().strip():\n cp = ord(c)\n\n if cp < ord(\"A\") or cp > ord(\"Z\"):\n raise ValueError(f\"Invalid column name: {x}\")\n\n index = index * 26 + cp - ord(\"A\") + 1\n\n return index - 1", "def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]", "def _pos2col(self, start, cpos, **opts):\n tw = opts.get('tab_width', self.TAB_WIDTH)\n tt = opts.get('tab_type', 'stop')\n if tt == 'fixed':\n\n def advance(p):\n return p + tw\n else:\n\n def advance(p):\n return tw * ((p + tw) // tw)\n\n colnum = 0\n while cpos > 0:\n if self.input[start] == '\\t':\n colnum = advance(colnum)\n else:\n colnum += 1\n start += 1\n cpos -= 1\n return colnum", "def int_to_alpha(num):\n remainder = num\n text = []\n if num >= 26:\n major = remainder // 26\n text.append(ascii_lowercase[remainder // 26 - 1])\n remainder -= major * 26\n text.append(ascii_lowercase[remainder])\n return \"\".join(text)", "def get_color(col, color):\n if color is None and col is None:\n return 'C0'\n if col is None:\n return color\n if not isinstance(col, int):\n raise ValueError(\"`col` must be an integer. Consider using `color` instead.\")\n return 'C{}'.format(col)", "def chr_mod(value: int) -> str:\n return Base64._CHARSET[value % len(Base64._CHARSET)]", "def col_to_symbol(index, sign, size_H):\n symbol = chr(index) if (sign == 1) else chr(index + size_H)\n return symbol", "def asIndex(i):\n return u\"\".join([unichr(0x2050 + ord(c)) for c in str(i)])", "def asIndex(i):\n return u\"\".join([unichr(0x2050 + ord(c)) for c in str(i)])", "def shift(self, char, key):\n shifted = chr(ord(char) + key)\n if shifted > 'Z':\n return chr(ord(shifted) - 26)\n else:\n return shifted", "def _offset_to_line_column(cls, chars_per_byte: int, offset: int, adjust_column: int = 0) -> str:\r\n line = (offset // cls.BYTES_PER_ROW) + 1 # Line is 1-based\r\n column = ((offset % cls.BYTES_PER_ROW) * chars_per_byte)\r\n return f\"{line}.{column + adjust_column}\"", "def change(coor):\n return chr(coor[0] + 65), coor[1] + 1", "def rot_char(input, shift: int):\n input = ord(input)\n if input >= ord('a') and input <= ord('z'):\n return chr(ord('a') + (input - ord('a') + shift) % 26)\n elif input >= ord('A') and input <= ord('Z'):\n return chr(ord('A') + (input - ord('A') + shift) % 26)\n return chr(input)", "def humanize(self, well_ref):\n row, col = self.decompose(well_ref)\n return \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"[row] + str(col + 1)", "def letter_to_index(letter):\r\n return ord(letter.lower()) - CHAR_A", "def letter_to_index(letter):\r\n return ord(letter.lower()) - CHAR_A", "def convert_to_alphabet(c, avoid_tab_and_lf=False):\n if c == 1:\n return 32 if avoid_tab_and_lf else 9 # space instead of TAB\n if c == 127 - 30:\n return 92 if avoid_tab_and_lf else 10 # \\ instead of LF\n if 32 <= c + 30 <= 126:\n return c + 30\n else:\n return 0 # unknown", "def decryptionShift(text, index):\n s = text;\n transformedChar = \"\"\n transformedChar = ord(s[index]) - 1\n\n if (s[index] == 'A'):\n transformedChar = chr(ord(s[index]) - 1 + 26)\n else:\n transformedChar = chr(ord(s[index]) - 1)\n\n print(\"Single Shift Decrypted text: \" )\n return s[:index] + transformedChar + s[index+1:]", "def to_ordinal(self):\n return mod(self.number - 1 + 39 * (self.number - self.name), 260)", "def max_width_col(table, col_idx):\n return max(len(row[col_idx]) for row in table)", "def decode(self, x, calc_argmax=True):\n if calc_argmax:\n x = x.argmax(axis=-1)\n return \"\".join(self.indices_char[x] for x in x)", "def ordinal(n):\n ord_dict = {1: \"st\", 2: \"nd\", 3: \"rd\"}\n return str(n + 1) + ord_dict.get((n + 1) if (n + 1) < 20 else (n + 1) % 10, \"th\")", "def XToCol(self, x):\r\n \r\n colLeft = 0\r\n numColumns = self.GetColumnCount()\r\n for col in xrange(numColumns):\r\n \r\n if not self.IsColumnShown(col):\r\n continue \r\n\r\n column = self.GetColumn(col)\r\n\r\n if x < (colLeft + column.GetWidth()):\r\n return col\r\n \r\n colLeft += column.GetWidth()\r\n \r\n return wx.NOT_FOUND", "def getCol(self, n, offset=0):\n return self._c[(n*self.__height + offset):((n+1) * self.__height)]", "def col_to_indices(col):\r\n return [(row, col) for row in range(0, 9)]", "def dctColIndex(pdct, symIndex, uelIndices):\n return _dctmcc.dctColIndex(pdct, symIndex, uelIndices)", "def strIdx(idx):\n if not isinstance(idx, (int, np.integer)):\n raise ValueError(\"Index must be an integer.\")\n\n return str(idx) if idx >= 0 else str(-idx) + u'\\u0305'", "def _get_col(self, idx):\n return self.text[self._fwf.column_slices[idx]]", "def format_column(p_df, column, no_of_prod, idx_col):\n p_df[column] = p_df[column].astype(int) + 1\n p_df[column] = np.where(p_df[column] <= no_of_prod,\n p_df[column].astype(str),\n \"CL\" + p_df[idx_col][p_df[column]-no_of_prod-1].astype(str)\n )\n p_df[column] = p_df[column].map(lambda x: x.rsplit('.', 1)[0])\n return p_df", "def shift_char(char, shift, charset):\n index = (charset.index(char) + shift) % len(charset)\n return charset[index]", "def decode(self, x, calc_argmax=True):\n if calc_argmax:\n x = x.argmax(axis=-1)\n return \"\".join(self.indices_char[x] for x in x)", "def test_multicolumn_factorize_columns_suffix_change():\n df = pd.DataFrame(\n {\n \"a\": [\"hello\", \"hello\", \"sup\"],\n \"b\": [1, 2, 3],\n \"c\": [\"aloha\", \"nihao\", \"nihao\"],\n }\n ).factorize_columns(column_names=[\"a\", \"c\"], suffix=\"_col\")\n assert \"a_col\" in df.columns\n assert \"c_col\" in df.columns\n assert \"a_enc\" not in df.columns\n assert \"c_enc\" not in df.columns", "def comp_attack_column(self):\n column_hit = self.column_arry[-1]\n if column_hit == 10:\n column = random.randint(0, 9)\n return column\n else:\n attk_random = self.random_attk_int()\n if attk_random == 1:\n column = column_hit + 1\n return column\n elif attk_random == 2:\n column = column_hit - 1\n return column", "def __get_column(self, index: int) -> int:\n return index % self.columns", "def decode(self, x, calc_argmax=True):\n if calc_argmax:\n x = x.argmax(axis=-1)\n return ''.join(self.indices_char[x] for x in x)", "def str_to_c(cmd_x,lenth):\n i = 0\n cmd_r = [0 for x in range(lenth)]\n while (i<lenth):\n cmd_r[i]=ord(cmd_x[i])\n i+=1\n return cmd_r", "def str_to_c(cmd_x,lenth):\n i = 0\n cmd_r = [0 for x in range(lenth)]\n while (i<lenth):\n cmd_r[i]=ord(cmd_x[i])\n i+=1\n return cmd_r", "def encryptionShift(text, index):\n s=text\n transformedChar=\"\"\n transformedChar = ord(s[index]) + 1\n\n if(transformedChar > 90):\n transformedChar=chr(ord(s[index]) + 1 - 26)\n else:\n transformedChar = chr(transformedChar)\n\n print(\"Single Shift Encrypted text: \")\n return s[:index] + transformedChar + s[index+1:]", "def letter_code(letter):\n value = ord(letter.lower()) - ord('a') + 10\n return value + value // 11", "def mapChrForVersion(c):\n\tif c.startswith('chrM'):\n\t\treturn 998\n\telif c == 'chrX':\n\t\treturn 999\n\telif c == 'chrY':\n\t\treturn 1000\n\telse:\n\t\treturn int(c[3:])", "def convert_ascii_character(x: str):\n return ord(x) * 10 if ord(x) < LIMIT else 0", "def _get_b26_num(rem, N):\n if N>0:\n pexp = 26**N\n remainder = (rem % pexp) \n return chr(97 + (rem // pexp)) + PyJSplit._get_b26_num(remainder, N-1) \n else: \n return chr(97 + rem)", "def random_alpha_num_char():\n num = random.randint(0, 26 + 26 + 10)\n if num < 26:\n return chr(num + 65)\n num -= 26\n if num < 26:\n return chr(num + 97)\n return chr(num + 48)", "def ordinal(n):\n if 11 <= n <= 19:\n return str(n) + \"th\"\n s = str(n)\n last = int(s[-1])\n if 1 <= last <= 3:\n return s + (\"st\", \"nd\", \"rd\")[last-1]\n return s + \"th\"", "def _cluster_name(index):\n if index < 26: return chr(97+index)\n else: return 'a'+chr(71+index)" ]
[ "0.7620537", "0.74437094", "0.7439917", "0.7273949", "0.68757963", "0.6614841", "0.65894353", "0.6583816", "0.6494394", "0.64713115", "0.64713115", "0.6470613", "0.6470613", "0.6470613", "0.64606607", "0.6388831", "0.6317523", "0.6244326", "0.61555386", "0.60580075", "0.5990195", "0.59671515", "0.58490455", "0.583574", "0.5821506", "0.5821506", "0.57869524", "0.57469344", "0.570745", "0.56874335", "0.5674583", "0.56738794", "0.56592226", "0.56551534", "0.56436974", "0.5633291", "0.5611472", "0.55985194", "0.5596305", "0.5571812", "0.5569438", "0.5569438", "0.5548842", "0.5539138", "0.55340314", "0.5525533", "0.5520429", "0.5519749", "0.5471431", "0.5471431", "0.5469898", "0.54650074", "0.544916", "0.54379237", "0.5409103", "0.53790647", "0.5375325", "0.5346884", "0.53454393", "0.53435135", "0.53415436", "0.53331333", "0.53165704", "0.53165704", "0.52327436", "0.5218499", "0.52072793", "0.519277", "0.51857007", "0.5178484", "0.5178484", "0.5175897", "0.51635695", "0.51570356", "0.51430386", "0.51382583", "0.51184124", "0.5109647", "0.50970614", "0.50957936", "0.5088581", "0.50883603", "0.50872624", "0.50845927", "0.50743896", "0.5074313", "0.50693023", "0.5066432", "0.5059257", "0.50572413", "0.5041691", "0.5041691", "0.50338715", "0.5024321", "0.5023506", "0.5010382", "0.50097656", "0.49886903", "0.4981044", "0.49777374" ]
0.7847133
0
Given a head of LinkedList, delete from that linkedList index j and skip index i iterative
def skip_i_delete_j(head, i, j): if i == 0: return None if head is None or j < 0 or i < 0: return head current = head previous = None while current: # skip (i - 1) nodes for _ in range(i - 1): if current is None: return head current = current.next previous = current current = current.next # delete next j nodes for _ in range(j): if current is None: break next_node = current.next current = next_node previous.next = current return head
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, index):\n if index == 0 and self.head is not None:\n self.head = self.head.next\n return\n\n current_index = 0\n current = self.head\n previous = None\n\n while current:\n if current_index == index:\n previous.next = current.next\n\n previous = current\n current = current.next\n current_index += 1", "def deleteAtIndex(self, index):\n cur = self.head\n if cur == None:\n return\n elif index == 0:\n self.head = cur.next\n\n cur, i = self.head, 1\n while cur and i != index:\n cur = cur.next\n i += 1\n if cur.next == None:\n cur = None\n else:\n cur.next = cur.next.next", "def remove_a_specific_item(self, index):\n\n current = self.head\n previous = None\n for i in range(index):\n previous = current\n current = current.next\n if previous is None: self.head = current.next\n else: previous.next = current.next\n self.size -= 1", "def deleteAtIndex(self, index: int) -> None:\n if(index == 0):\n self.head = self.head.next\n else:\n prev = None \n cur = self.head \n cnt = 0 \n \n while cur != None:\n if(cnt == index):\n next_node = cur.next\n prev.next = next_node \n return\n else:\n prev = cur \n cur = cur.next\n cnt += 1", "def delete_by_index(self, index):\n if index < 0 or index >= self.get_size():\n raise IndexError('Index out of bounds')\n if index == 0:\n self.head = self.head.next\n return\n i = 0\n temp = self.head\n while temp is not None:\n if i == index-1:\n temp.next = temp.next.next\n break\n temp = temp.next\n i += 1", "def deleteAtIndex(self, index):\n\n if index < 0:\n return -1\n\n p = self.head\n while index and p: # 0-index before index-th\n p = p.next\n index -= 1\n\n if p == None or p.next == None:\n return\n if p.next.next:\n p.next.next.prev = p\n p.next = p.next.next\n if p.next == None:\n self.tail = p\n # self.printList()", "def deleteAtIndex(self, index):\n if index >= 0 and index < self.length:\n prev = None\n curr = self.head\n _next = None\n if curr:\n _next = curr.next\n for i in range(1, index + 1):\n prev = curr\n curr = curr.next\n if curr:\n _next = curr.next\n if prev:\n prev.next = _next\n else:\n self.head = _next\n self.length -= 1", "def deleteAtIndex(self, index: int) -> None:\n if self.head == None:\n return -1\n curr = self.head\n if index == 0:\n self.head = curr.next\n return\n if index < 0:\n return -1\n for i in range(index - 1):\n curr = curr.next\n if curr is None:\n break\n if curr is None:\n return -1\n if curr.next is None:\n return -1\n \n next = curr.next.next\n curr.next = None\n curr.next = next", "def erase(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n last_node = current_node\r\n current_node = current_node.next\r\n if current_index == index:\r\n last_node.next = current_node.next\r\n return\r\n current_index += 1", "def deleteAtIndex(self, index: int) -> None:\n if index < 0 or index >= self.size:\n return\n\n curr = self.head\n for _ in range(index):\n curr = curr.next\n curr.next = curr.next.next\n self.size -= 1", "def delete_by_index(self, index):\n cur = self.head\n length=self.get_length()\n if type(index) is int:\n if self.is_empty():\n return\n else:\n if index > length:\n # The index value is out of range and prompts and exits\n print(\"Index is out of range.\")\n return\n else:\n if index == 0:\n if cur.next == None:\n self.head = None\n else:\n cur.next.prev = None\n self.head = cur.next\n return\n else:\n while (index) > 0:\n cur = cur.next\n index -= 1\n\n # Point the next node of cur to the next node of cur\n cur.prev.next = cur.next\n # Point the prev of the next node of cur to the previous node of cur\n cur.next.prev = cur.prev\n length -= 1\n return\n else:\n print(\"Index value is not int.\")\n return", "def deleteAtIndex(self, index):\n cur = self.head\n prev = None\n# self.display(\"deleteAtIndex, deleting value at index \"+str(index))\n if not index:\n head = head.nxt\n if self.tail == cur:\n self.tail = None\n del cur\n return\n \n i = 0\n while i < index and cur:\n prev = cur\n cur = cur.nxt\n i+=1\n if prev:\n if cur:\n prev.nxt = cur.nxt\n if self.tail == cur:\n self.tail = prev\n del cur", "def delete(self, index):\n # check validity of index:\n if index < 0 or index > self.n:\n print(\"Index Error; please input valid index\")\n return\n # if head element is to be removed,\n if index == 0:\n _ = self.pop_front()\n return\n # else,\n temp_node = self.head\n for _ in range(index-1):\n temp_node = temp_node.next # traverse the list\n index_node = temp_node.next\n # unlink\n temp_node.next = temp_node.next.next\n index_node = None\n self.n -= 1", "def deleteAtIndex(self, index):\n if index < 0 or index >= self.size:\n return\n\n curr = self.head\n if index == 0:\n self.head = curr.next\n else:\n for i in range(index - 1):\n curr = curr.next\n curr.next = curr.next.next\n\n self.size -= 1", "def deleteAtIndex(self, index: int) -> None:\n if index < 0 or index > self.cnt-1:\n return \n tmp = self.dummy\n for _ in range(index):\n tmp = tmp.next\n if index == self.cnt - 1:\n tmp.next = None\n else:\n tmp.next = tmp.next.next\n if tmp.next:\n tmp.next.pre = tmp\n self.cnt -= 1", "def delete_list(self): \n temp_node = self.head\n while temp_node is not None:\n prev_node = temp_node\n temp_node = temp_node.next\n # prev_node.val += \": deleted\" # for sanity check\n # reset data\n prev_node.val = None\n prev_node.next = None", "def deleteAtIndex(self, index):\n if index >= self.len:\n return\n p = self.head\n while index > 0:\n index -= 1\n p = p.next\n if p.next is self.tail:\n self.tail = p\n p.next = p.next.next\n self.len -= 1", "def delete(self, ele):\n prev = current = self.head\n element_in_head = False\n if self.head:\n while True:\n\tif current.data == ele:\n\t if current == self.head:\n\t element_in_head = True\n\t else:\n\t prev.next = current.next\n\t break\n\tprev = current\n\tcurrent = current.next\n\tif current == self.head:\n\t break\n if element_in_head:\n\tif self.head.next == self.head:\n\t self.head = None\n\telse:\n\t prev.next = self.head.next\n\t self.head = self.head.next", "def remove_nth_element(self, position):\n if not self.head or position > self.length() -1:\n raise LinkedListException\n if position == 0 and self.head == self.head.next:\n self.head = None\n else:\n current = self.head\n prev = self.head\n counter = 0\n while counter < position or position == 0:\n counter += 1\n prev = current\n current = current.next\n\tif current == self.head:\n\t break\n if position == 0:\n self.head = current.next\n prev.next = current.next", "def delete_from_tail(self):\n\n current = self.head\n #get the node right before the tail\n while current != None:\n if current.next == self.tail:\n current.next = None\n return\n current = current.next", "def delete_node_at_index(self, index):\n if index < 0 or index >= self.size:\n return\n\n curr = self.head\n if index == 0:\n self.head = curr.next\n else:\n for i in range(index - 1):\n curr = curr.next\n curr.next = curr.next.next\n\n self.size -= 1", "def delete(self, data):\r\n current_node = self.head\r\n current_index = 0\r\n index = self.get_index(data)\r\n while current_node.next != None:\r\n last_node = current_node\r\n current_node = current_node.next\r\n if current_index == index:\r\n last_node.next = current_node.next\r\n return\r\n current_index += 1", "def remove_index(self, index):\n current = self.head\n position = index\n if index > (self.size() - 1):\n return None\n elif index == 0:\n self.head = current.next_node\n else: \n while position >= 1:\n previous = current\n current = current.next_node\n position -= 1 \n previous.next_node = current.next_node\n\n return current", "def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n dummy = ListNode(0)\n dummy.next = head\n first = dummy\n second = dummy\n\n for i in range(n + 1):\n first = first.next\n\n while first:\n first = first.next\n second = second.next\n\n second.next = second.next.next\n\n return dummy.next", "def deleteAtIndex(self, index: int) -> None:\n # if the index is invalid, do nothing\n if index < 0 or index >= self.size:\n return\n \n # find predecessor and successor of the node to be deleted\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next.next\n else:\n succ = self.tail\n for _ in range(self.size - index - 1):\n succ = succ.prev\n pred = succ.prev.prev\n \n # delete pred.next \n self.size -= 1\n pred.next = succ\n succ.prev = pred", "def deleteAtIndex(self, index: int) -> None:\n node = self.get_node(index)\n\n if node:\n #print(\"called inside node to delete is \" + str(node) )\n prev_node = node.prev\n next_node = node.next\n\n if prev_node:\n prev_node.next = next_node\n else:\n self.head = next_node\n if next_node:\n next_node.prev = prev_node\n\n\n\n\n self.node_count -= 1", "def delete(self, value):\n current = self.head\n index = 1\n ''' delete first element '''\n if index == 1 and current.value == value:\n print (\"deleting first element\")\n current.next = current.next.next\n return\n \n ''' delete last element '''\n while not current.next.next and current.next.value == value:\n print (\"deleting last element\")\n current.next = None\n return\n \n ''' anywhere in between '''\n while current.next.next and current.next.value != value:\n current = current.next\n \n ''' delete the element '''\n print (\"deleting anywhere between element\")\n current.next = current.next.next\n return", "def delI(current,i):\r\n j=1\r\n while(current.next):\r\n if j<i:\r\n j+=1\r\n current=current.next\r\n elif j==i:\r\n delNode=current.next\r\n nextNode=delNode.next\r\n current.next=nextNode\r\n print('del num is ',delNode)\r\n return nextNode\r\n return False", "def remove(self, d):\n\n if self.head is not None:\n if self.head.data == d:\n self.head = self.head.next\n else:\n temp = self.head\n while temp.next is not None:\n if temp.next.data == d:\n temp.next = temp.next.next\n break\n else:\n temp = temp.next", "def erase(self, index):\n if self.empty():\n return \"Linked List is empty\"\n size = self.size()\n if index > size - 1:\n return \"Size of the Linked List is less than the index\"\n\n idx = 0\n h = self.head\n previous = self.head\n while h.next is not None:\n if idx is index:\n if previous is h:\n data = h.data\n self.head = h.next\n return data\n else:\n data = h.data\n previous.next = h.next\n h = None\n return data\n idx += 1\n previous = h\n h = h.next\n\n # Pop the last element\n data = previous.data\n previous.next = None\n return data", "def deleteAtIndex(self, index):\n if index < 0 or index >= self._size:\n return\n elif index == 0:\n self.deleteHead()\n return\n elif index == self._size - 1:\n self.deleteTail()\n return\n\n current = self._head\n for _ in range(index - 1):\n current = current.next\n current.next = current.next.next\n self._size -= 1", "def remove(self , element):\n current = self.head \n previous = None\n\n while current and current.data != element:\n previous = current\n current = current.next\n\n if previous == None :\n self.head = current.next\n elif current :\n previous.next = current.next\n current.next = None", "def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n\n if not head or not head.next:\n return None\n\n first_pointer = head\n second_pointer = head\n for i in range(n):\n second_pointer = second_pointer.next\n if not second_pointer:\n return head.next\n\n while second_pointer.next:\n first_pointer = first_pointer.next\n second_pointer = second_pointer.next\n first_pointer.next = first_pointer.next.next\n\n return head", "def delete_node_at_start(self):\n if not self.head:\n print('List already empty.')\n return\n self.head = self.head.next", "def delete(self, value):\n current = self.head\n previous = None\n while current.value != value and current.next:\n previous = current\n current = current.next\n if current.value == value:\n if previous:\n previous.next = current.next\n else:\n self.head = current.next\n pass", "def delete(self, value):\n current = self.head\n if current.value == value:\n self.head = current.next\n else:\n while current:\n if current.value == value:\n break\n prev = current\n current = current.next\n if current == None:\n return\n prev.next = current.next\n current = None", "def remove(self,index=0):\n if index>self.size-1: raise IndexError(\"Index out of range.\")\n elif self.size==1: self.reference=None\n else:\n pointer = self.reference\n for i in range(index): pointer = pointer.next\n pointer.previous.next, pointer.next.previous = pointer.next, pointer.previous\n if index==0: self.reference=self.reference.next\n self.size-=1", "def delete_node_at_end(self):\n if not self.head:\n print('List already empty')\n return\n temp = self.head\n while temp.next:\n if not temp.next.next:\n break\n temp = temp.next\n temp.next = None", "def remove(self, pointer=None, index=None, data=None):\n def rPointer(pointer):\n start = self.head\n if start==pointer:\n self.head = self.head.getLink()\n return start\n while start:\n if start.getLink()==None:\n return None\n if start.getLink()==pointer:\n temp=start.getLink()\n start.setLink(temp.getLink())\n return temp\n start = start.getLink()\n\n def rIndex(index):\n start = self.head\n count = 0\n if index == count:\n self.head = start.getLink()\n return start\n while count < index-1:\n start = start.getLink()\n if not start:\n return None\n count+=1\n else:\n temp=start.getLink()\n start.setLink(temp.getLink())\n return temp\n\n def rData(data):\n start = self.head\n if start.getMember()==data:\n self.head = start.getLink()\n return start\n while start:\n if start.getLink().getMember()==data:\n temp=start.getLink()\n start.setLink(temp.getLink())\n return temp\n start = start.getLink()\n return None\n \n if pointer and type(pointer)==Member:\n return rPointer(pointer)\n if index and type(index)==int:\n return rIndex(index)\n if data and type(data)==dict:\n return rData(data)\n return None", "def remove_dup2(linkedlist):", "def delete_at_index(self, index: int) -> T:\n try:\n previous_node = self.__get_node_at_index(index-1)\n except ValueError as e:\n if self.is_empty(): \n raise ValueError(\"List is empty\")\n elif index == 0:\n item = self.head.items\n self.head = self.head.link\n else:\n raise e\n else:\n item = previous_node.link.items\n previous_node.link = previous_node.link.link\n self.length -= 1\n return item", "def delete(self, value):\n current = self.head\n prev = None\n\n while current:\n if current.value == value:\n if prev == None:\n self.head = current.next\n else:\n prev.next = current.next\n break\n prev = current\n current = current.next", "def erase(self, index):\n node = self._get_node_at(index) \n if node is None:\n raise IndexError('List index out of range.') \n if node == self.head: \n if node.next_node is None:\n self.tail = None \n else: \n node.next_node.prev_node = None \n self.head = node.next_node\n elif node == self.tail: \n node.prev_node.next_node = None \n self.tail = node.prev_node\n else: \n node.prev_node.next_node = node.next_node\n node.next_node.prev_node = node.prev_node\n return node.value", "def removeDuplicates(self,head):\n if head != None:\n currentNode = head\n if(currentNode.next): \n counterNode = currentNode.next\n while(currentNode):\n if(counterNode):\n if(currentNode.data == counterNode.data): \n currentNode.next = None #If there are duplicate data, we cut connection between them.\n else:\n currentNodenext = counterNode # If there is no duplite, we connect again two nodes.\n currentNode = currentNode.next\n counterNode = counterNode.next\n else:\n break\n return head", "def deleteHead(self):\n if not self._head:\n return\n\n if self._head is self._tail:\n self._head = None\n self._tail = None\n else:\n self._head = self._head.next\n self._size -= 1", "def remove(self, data):\n\n traverse = self.head\n temp = self.head\n if traverse.data == data:\n self.head = traverse.next\n return\n\n while traverse.next != None:\n\n temp = traverse.next\n if temp.data == data:\n traverse.next = temp.next\n return\n\n traverse = traverse.next", "def remove(self,p):\r\n \r\n if p == self.head: #if p is the head node\r\n self.head = p.next #set the next node of p to be the 'new' head node\r\n (p.next).prev = None #remove the node at p\r\n p.next = None\r\n \r\n elif p == self.tail: #if p is the tail node\r\n self.tail = p.prev #set the prev node of p to be the 'new' tail node\r\n (p.prev).next = None #remove the node at p\r\n p.prev = None\r\n \r\n else:\r\n (p.prev).next = p.next #linking out p\r\n (p.next).prev = p.prev\r\n p.prev = None #invalidating the position p\r\n p.next = None\r\n\r\n self.size -=1 #decrease length of linked list by 1\r", "def __remove_first(self):\n if self.__head is not None:\n self.__length -= 1\n self.__head = self.__head.next()\n if self.__length == 0: # when there are no more elements in the list,\n self.__last = None # remove the pointer to the last element", "def delete(self, index):\n if not 1 <= index <= self.count: #de index moet waardig zijn\n return False\n if self.isEmpty() is True: #als de lijst leeg is, kan je niet verwijderen\n return False\n if self.getLength() == 1: #speciaal geval, nog maar 1 item, verwijder door self.head.next er niet meer naar te laten wijzen\n self.head.next = None\n if index == 1: #als index == 1, dan moet dus het laatste item weg, dus moet men helemaal de ketting doorlopen om prev te vinden\n index = self.count + 1 #had evengoed range(0, index (zonder + 1)) kunnen doen, het doel is om naar het voorlaatste item te gaan\n current = self.head.next\n prev = self.head\n for teller in range(1, index): #zoek 'voorlaatste' (eigenlijk laatste, maar visueel eerder 'voorlaatste' item, want deze wijst naar het laatste\n current = current.next\n prev = prev.next\n self.head.next = prev\n prev.next = current.next #current moet eigenlijk weg, dus zetten we de pointer van de vorige op het item waar current naar wijst\n #op deze manier wordt current eigenlijk 'ovegeslagen'\n else:\n current = self.head.next\n prev = self.head\n for teller in range(1, index):\n current = current.next\n prev = prev.next\n prev.next = current.next\n\n\n self.count -= 1\n return True", "def remove_duplicates_slow(linked_list):\n current = linked_list.head\n while current:\n runner = current\n while runner:\n if runner.next_node and runner.next_node.value == current.value:\n # delete this duplicate\n runner.next_node = runner.next_node.next_node\n runner = runner.next_node\n current = current.next_node", "def delete_by_data(self, data):\n if self.is_empty():\n return\n else:\n cur = self.head\n if cur.data == data:\n # If the element of the first node is the element to be deleted\n if cur.next == None:\n self.head = None\n else:\n cur.next.prev = None\n self.head = cur.next\n return\n while cur != None:\n if cur.data == data:\n # Point the next node of cur to the next node of cur\n cur.prev.next = cur.next\n # Point the prev of the next node of cur to the previous node of cur\n cur.next.prev = cur.prev\n break\n cur = cur.next", "def delete_node(head, nodetodelete):\n\n #Nothing to do if the head is None or the nodetodelete is None\n if not head and not nodetodelete:\n print 'Nothing to delete. No arguments passed in'\n return\n\n #is the nodetodelete the head node, deal with that.\n node=head\n\n while(node.next):\n if(node.next == nodetodelete):\n if(node.next.next == None):\n node.next = None\n else:\n node.next = node.next.next\n\n #After finding the relevant nodetodelete, break out of the loop\n break\n node = node.next", "def remove(self, item):\n \"\"\"\n :type item: Node()\n :rtype None\n \"\"\"\n if self.head.getData() == item:\n self.head = self.head.getNext()\n return\n\n prev = curr = self.head\n while curr: \n if curr.getData() == item:\n prev.setNext(curr.getNext())\n break\n prev = curr\n curr = curr.getNext()", "def remove(self, key):\n if self.head is None:\n print('Cannot remove from empty list!')\n return\n if self.head.data == key:\n self.head = self.head.next\n return\n\n itr = self.head\n prev = ListNode()\n while itr:\n curr = itr\n if itr.data == key:\n prev.next = curr.next\n return\n prev = curr\n itr = itr.next", "def _remove(self, curr, prev):\n if prev:\n # If there is a previous node then update it's next attribute\n # to refer to the next node of the node that is being removed.\n prev.next = curr.next\n else:\n # If there is no previous node then we are at the head of the list.\n # Update the first_node reference to the next node of the node \n # that is being removed.\n self.first_node = curr.next\n # Delete the node that has been delinked.\n del curr", "def delete(self, key):\n # Your code here\n index = self.hash_index(key) \n print(index)\n cur = self.data[index].head\n\n if cur.key==key:\n \n self.data[index].head = self.data[index].head.next\n # cur.next = self.data[index].head\n self.count -=1\n print(\"Warning:headnode deleted\") \n else:\n \n while cur.next: \n prev = cur\n cur =cur.next\n if cur.key == key:\n #to remove the current node, change the pointers\n prev.next=cur.next \n self.count -=1 \n \n\n # return None", "def remove(self, val):\n current_node = self.head\n previous_node = None\n\n while current_node:\n if current_node.val == val:\n if previous_node:\n previous_node.next = current_node.next\n else:\n self.head = current_node.next\n\n previous_node = current_node\n current_node = current_node.next", "def remove(self, key: int) -> None:\n index = key % self.size\n if self.table[index].value is None:\n return \n \n p = self.table[index]\n \n if p.key == key:\n if p.next is None:\n self.table[index] = ListNode()\n else:\n self.table[index] = p.next\n return\n \n prev = p\n while p:\n if p.key == key:\n prev.next = p.next\n return\n prev = p\n p = p.next\n #p = p.next\n #prev = p\n #prev, p = p, p.next", "def delete(self, key: int) -> None:\n i = k % self.capacity\n cur = pre = self.data[i]\n if not cur:\n return\n if cur.pair[0] == k:\n self.data[i] = cur.next\n else:\n cur = cur.next\n while cur:\n if cur.pair[0] == k:\n pre.next = cur.next\n break\n else:\n cur, pre = cur.next, pre.next", "def delete_node_position(self, position):\n if not self.head:\n print('List is empty. No item to delete')\n return\n if position == 1:\n self.head = self.head.next\n return\n temp = self.head\n count = 1\n while temp and count < position - 1:\n count += 1\n temp = temp.next\n if not temp:\n print('Node doesn\\'t exist')\n return\n temp.next = temp.next.next", "def delete(self, data):\n\n current = self.head\n previous = None\n found = False\n while current and found is False:\n if current.data == data:\n found = True\n else:\n previous = current\n current = current.next\n if current is None:\n raise ValueError(\"Data not in list\")\n if previous is None:\n self.head = current.next\n else:\n previous.next = current.next\n self.size -= 1", "def remove(self, data):\n\n traverse = self.head\n temp = self.head\n if self.head == None:\n return None\n\n if traverse.data == data:\n self.head = traverse.next\n return\n\n while traverse.next != None:\n\n temp = traverse.next\n if temp.data == data:\n traverse.next = temp.next\n return\n\n traverse = traverse.next", "def test_delete_node(self):\r\n myObj = DLinkedList()\r\n myObj.append(120)\r\n myObj.append(100)\r\n myObj.detete_node(myObj.head.next)\r\n myObj.detete_node(myObj.head)\r\n self.assertEqual(myObj.get_head(), None)\r\n self.assertEqual(myObj.get_tail(), None)", "def delete(self,pos):\n pos.next = pos.next.next", "def remove_all(self, d):\n\n # Removes leading <d>'s by moving self.head\n while self.head is not None and self.head.data == d:\n self.head = self.head.next\n\n # Removes following <d>'s by traversing the LinkedList\n if self.head is not None:\n temp = self.head\n while temp.next is not None:\n if temp.next.data == d:\n temp.next = temp.next.next\n else:\n temp = temp.next", "def remove_player(lst,player):\n print(\"Removing\",player)\n cursor=lst.head\n while cursor.data!=player:\n cursor=cursor.next\n if cursor==lst.head:\n cursor.next.prev=lst.tail\n cursor.prev.next=cursor.next\n lst.head=cursor.next\n if cursor==lst.tail:\n cursor.next.prev=cursor.prev\n cursor.prev.next=lst.head\n lst.tail=cursor.prev\n cursor.prev.next=cursor.next\n cursor.next.prev=cursor.prev\n lst.size-=1", "def _delete(self):\n self.prev.next = self.next\n self.next.prev = self.prev", "def remove(self, item):\n \n previous = None\n current = self.head\n \n while current is not None:\n \n if current.get_data() == item:\n # If the item to be removed is the first item\n if previous is None:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n return\n \n else:\n previous = current\n current = current.get_next()", "def remove(self, key):\n if self.head.data == key: # checking first corner case of first node to be removed\n self.head = self.head.next\n return\n\n elif self.head is None: # checking second corner case of linked list being empty\n return\n\n else: # otherwise maintain two pointers and remove the required node\n curr_node = self.head.next\n prev_node = self.head\n while prev_node.next is not None:\n if curr_node.data == key:\n prev_node.next = curr_node.next\n return\n\n return", "def delete_node(self, key):\n if not self.head:\n print('List is empty. No item to delete')\n return\n if self.head.data == key:\n self.head = self.head.next\n return\n temp = self.head\n while temp.next:\n if temp.next.data == key:\n break\n temp = temp.next\n temp.next = temp.next.next", "def delete(self, del_pos=None):\n if del_pos is None:\n del_pos = self.__length\n if self.__list is None:\n print \"Nothing to remove.\"\n else:\n if del_pos == 0:\n self.__list = self.__list.get_next()\n else:\n prior = self.__list\n current = self.__list.get_next()\n current_pos = 1\n while current.get_next() is not None and current_pos < del_pos:\n prior = current\n current = current.get_next()\n current_pos += 1\n prior.set_next(current.get_next())\n self.__length -= 1", "def remove_second(list):\n if list is None: return\n first = list\n second = list.next\n # Make the first node refer to the third\n first.next = second.next\n # Separate the second node from the rest of the list\n second.next = None\n return second", "def test_delete_node_sll_is_head(self):\n sll = SinglyLinkedList()\n a = Node('a')\n b = Node('b')\n sll.insert_beg(a)\n sll.insert_beg(b)\n sll.delete(b,start_node=b)\n assert(sll.head)", "def test_delete_sll_next_node(self):\n sll = SinglyLinkedList()\n a = Node('a')\n b = Node('b')\n c = Node('c')\n sll.insert_beg(a)\n sll.insert_beg(b)\n sll.insert_beg(c)\n sll.delete(a,start_node=b)\n actual = [i.data for i in sll]\n expected = 'a'\n nt.assert_not_in(expected,actual)", "def removeNode(self, node__to__remove): # Class O(nlog2n)\r\n # This is clear the worst function. It goes to different if statements before\r\n # start the 'real' computation to replace the value\r\n if node__to__remove > self.length():\r\n raise ValueError(\"Invalid position. The LinkedList has length %s\" % self.length())\r\n elif node__to__remove == 1:\r\n if self.length() == 1:\r\n raise ValueError(\"The LinkedList has only one node (the head)\")\r\n if self.length() == 2:\r\n self.head = Node(self.head.next)\r\n else:\r\n self.head = Node(self.head.next, self.head.next.next)\r\n elif (self.length() - 1) == node__to__remove:\r\n h = self.head\r\n count = 1\r\n while count != (node__to__remove - 1):\r\n h = h.next\r\n count += 1\r\n h.next = Node(h.next.next)\r\n elif self.length() == node__to__remove:\r\n h = self.head\r\n count = 2\r\n while count != (node__to__remove - 1):\r\n h = h.next\r\n count += 1\r\n h.next = Node(h.next)\r\n else:\r\n h = self.head\r\n count = 2\r\n while count != node__to__remove:\r\n h = h.next\r\n count += 1\r\n h.next = Node(h.next.next, h.next.next.next)", "def delete(self,del_node,start_node=None):\n if not self.head:\n raise IsEmpty(\n \"There are no nodes.\"\n )\n\n elif self.head == del_node:\n self.head = self.head.next\n return\n\n elif start_node == del_node or not start_node:\n start_node = self.head\n\n start = start_node\n next_node = start_node.next\n\n while True:\n if next_node == del_node:\n start.next = start.next.next\n return\n elif not next_node:\n raise NotFound(\"Can't find node\")\n start = next_node\n next_node = next_node.next", "def delete_node(self, node):\n curr = self.head\n while curr.next is not None:\n if curr.next == node:\n break\n curr = curr.next\n curr.next = node.next\n node = None\n return", "def removeDuplicates(self): \r\n aux = self.head \r\n if aux is None: \r\n return\r\n while aux.next is not None: \r\n #Compare head node with next node\r\n if aux.data == aux.next.data: \r\n new = aux.next.next\r\n aux.next = new \r\n else: \r\n aux = aux.next\r\n return self.head", "def remove_first(self):\n # return None if there are no Nodes\n if self.head is None:\n return None\n # save and disconect the first Node from the list\n # and set the head to the next Node\n removed = self.head\n self.head = self.head.next\n removed.next = None\n # set the tail as None if list got empty\n if self.head is None:\n self.tail = None\n # remove the skip back pointer from the second Node if needed\n elif self.head.next is not None:\n self.head.next.skip_back = None\n \n return removed.data", "def remove(self, element):\n if self.head.element == element:\n self.head = self.head.next\n self.head.prev = None\n return None\n cursor = self.head\n while cursor.next is not None:\n if cursor.next.element == element:\n cursor.next = cursor.next.next\n if cursor.next is not None:\n cursor.next.prev = cursor\n break\n else:\n cursor = cursor.next", "def delete_by_value(self, key):\n cur_node = self.head\n\n if cur_node and cur_node.data == key:\n self.head = cur_node.next\n cur_node = None\n prev = None\n while cur_node and cur_node.data != key:\n prev = cur_node\n cur_node = cur_node.next\n if cur_node is None:\n return\n prev.next = cur_node.next\n cur_node = None", "def delete_element(some_list, index):\n del some_list[index]\n return some_list", "def remove(self, value):\r\n if self.head is None:\r\n return\r\n\r\n if self.head.value == value:\r\n self.head = self.head.next\r\n return\r\n\r\n node = self.head\r\n while node.next:\r\n if node.next.value == value:\r\n node.next = node.next.next\r\n return\r\n node = node.next", "def deleteTail(self):\n if not self._tail:\n return\n\n if self._head is self._tail:\n self._head = None\n self._tail = None\n else:\n current = self._head\n while current.next != self._tail:\n current = current.next\n current.next = None\n self._tail = current\n self._size -= 1", "def deleteNode(self, node: ListNode, n: int) -> None:\n while node.val != n:\n node = node.next\n\n if node.val == n:\n node.val = node.next.val\n node.next = node.next.next", "def delete_ll_node(node):\n node.val = node.next.val\n node.next = node.next.next", "def __delitem__(self, index):\n # If input is a slice then delete all elements as determined\n # by the slice attributes, using an offset to account for the\n # changing size of the list.\n if isinstance(index, slice):\n offset = 0\n for i in xrange(*index.indices(len(self))):\n if i > -(len(self) + 1) or i < len(self):\n del self[i - offset]\n offset += 1\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n del cur_node.data_list[index]\n self.length -= 1\n\n self.__balance_node(prev_node, cur_node)", "def reorderList(self, head: ListNode) -> None:\n length = 0\n curr = head\n node_dict = {}\n while curr is not None:\n node_dict[length] = curr\n curr = curr.next\n length += 1\n\n j = length - 1\n for i in range(length):\n if node_dict[i].next is None or node_dict[i].next == node_dict[j]:\n break\n i_next = node_dict[i].next\n j_next = node_dict[j].next\n node_dict[i].next = node_dict[j]\n node_dict[j].next = i_next\n node_dict[j - 1].next = j_next\n j -= 1", "def remove_from_head(self):\n\n if self.size == 0: # no elements in list\n return None # nothing to return\n\n removed_value = self.head.value # make a copy of the node to be deleted\n\n if self.size == 1: # if only one element in list (node is head and tail)\n self.head = self.tail = None # list will be empty\n\n else: # more than one element in list\n self.head = self.head.next # shift head right (reassign head to head.next)\n self.head.prev = None # reassign head.prev to point at None (it used to point at old_head)\n\n self.size -= 1\n return removed_value", "def remove(self):\r\n if self.first() is not None:\r\n self.dec_size()\r\n self.set_first(self.first().next())\r\n if self.size() == 0: # when there are no more elements in the list,\r\n self.__last = None # remove the pointer to the last element\r", "def delete(self):\n if self.prev:\n self.prev.next = self.next\n if self.next:\n self.next.prev = self.prev", "def delete(self, k: Any) -> Any:\n i = abs(hash(k)) % self.size\n current = self.data[i]\n last = None\n while current is not None:\n if current.key == k:\n if last is not None:\n last.next = current.next\n else:\n self.data[i] = None\n last = current\n current = current.next", "def remove_value(self, value):\n if self.empty():\n return \"Linked List is empty\"\n h = self.head\n previous = self.head\n idx = 0\n while h is not None:\n if h.data is value:\n if previous is h:\n self.head = h.next\n return idx\n else:\n previous.next = h.next\n h = None\n return idx\n idx += 1\n previous = h\n h = h.next\n\n pass", "def test_iter_empty_sll(self):\n sll = SinglyLinkedList()\n a = Node('a')\n sll.insert_beg(a)\n sll.delete(a,a)\n print [i for i in sll]", "def delete(self):\n if self.head is None:\n return None\n item = self.head.data\n self.head = self.head.next\n return item", "def delete_node_at_pos(self, pos):\n if self.head:\n cur_node = self.head\n if pos == 0:\n self.head = cur_node.next\n cur_node = None\n return \n\n prev = None\n count = 0 \n while cur_node and count != pos:\n prev = cur_node\n cur_node = cur_node.next\n count += 1\n\n if cur_node is None:\n return \n\n prev.next = cur_node.next\n cur_node = None", "def remove(self, item):\n \n previous = None\n current = self.head\n \n while current is not None:\n \n if current.get_data() == item:\n # If the item to be removed is the first item\n if previous is None:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n return\n \n # Early stop\n elif current.get_data() > item:\n return\n \n else:\n previous = current\n current = current.get_next()", "def remove_with_sort(to_remove):\n slow = to_remove.head\n runner = to_remove.head\n\n while slow:\n while runner:\n if runner.next_node:\n if slow.value == runner.next_node.value:\n runner.next_node = runner.next_node.next_node\n runner = runner.next_node\n slow = slow.next_node\n try:\n runner = slow.next_node\n except:\n pass", "def removeDuplicatesFromLinkedList(linkedlist):\n if not linkedlist:\n return None\n\n head = node = linkedlist\n while node:\n while node.next and node.value == node.next.value:\n node.next = node.next.next\n\n node = node.next\n\n return head", "def remove_by_value(self, data):\n pre_node = None\n for n in self:\n if n.data == data:\n if pre_node is None:\n self.pop()\n else:\n pre_node.next = n.next\n break\n pre_node = n\n else:\n raise ValueError(f'value [{data}] not found in linked list')" ]
[ "0.7670848", "0.7561153", "0.73862875", "0.7377517", "0.73646194", "0.73238605", "0.7250336", "0.72010875", "0.71746737", "0.71504664", "0.713762", "0.71322805", "0.7128258", "0.7071973", "0.70681566", "0.7032534", "0.69234806", "0.6903745", "0.6830704", "0.6819169", "0.6818123", "0.6764261", "0.67539406", "0.67496353", "0.6737122", "0.670159", "0.6696203", "0.6687579", "0.6655226", "0.6654771", "0.66542953", "0.6637123", "0.6576577", "0.6568986", "0.6510411", "0.648773", "0.6483608", "0.6442261", "0.6440237", "0.63927984", "0.6336201", "0.6331447", "0.63268536", "0.6304559", "0.62922424", "0.6286699", "0.62817496", "0.6266815", "0.62661153", "0.6252132", "0.6223539", "0.6215759", "0.6206565", "0.6175302", "0.6155453", "0.61247355", "0.6117967", "0.6109111", "0.60927", "0.6083678", "0.6059335", "0.6044642", "0.600802", "0.59907645", "0.5982603", "0.59707296", "0.59694105", "0.5931426", "0.5908916", "0.59071034", "0.5901596", "0.589327", "0.5876313", "0.5874675", "0.5868423", "0.58669966", "0.5862185", "0.58597153", "0.5835345", "0.58325773", "0.5832357", "0.58322513", "0.581984", "0.5817634", "0.5815224", "0.58145547", "0.5808568", "0.5792798", "0.5788487", "0.577906", "0.57765955", "0.57688475", "0.5766507", "0.576208", "0.5750653", "0.5736575", "0.57257164", "0.57245326", "0.5718856", "0.57129544" ]
0.81535465
0
Perform a context visibility test. Creates a (fake) image with the specified owner and is_public attributes, then creates a context with the given keyword arguments and expects exp_res as the result of an is_image_visible() call on the context.
def do_visible(self, exp_res, img_owner, img_public, **kwargs): img = FakeImage(img_owner, img_public) ctx = context.RequestContext(**kwargs) self.assertEqual(ctx.is_image_visible(img), exp_res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def create_image_from_visibility(vis, **kwargs) -> Image:\n assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), \\\n \"vis is not a Visibility or a BlockVisibility: %r\" % (vis)\n \n log.debug(\"create_image_from_visibility: Parsing parameters to get definition of WCS\")\n \n imagecentre = get_parameter(kwargs, \"imagecentre\", vis.phasecentre)\n phasecentre = get_parameter(kwargs, \"phasecentre\", vis.phasecentre)\n \n # Spectral processing options\n ufrequency = numpy.unique(vis.frequency)\n vnchan = len(ufrequency)\n \n frequency = get_parameter(kwargs, \"frequency\", vis.frequency)\n inchan = get_parameter(kwargs, \"nchan\", vnchan)\n reffrequency = frequency[0] * units.Hz\n channel_bandwidth = get_parameter(kwargs, \"channel_bandwidth\", 0.99999999999 * vis.channel_bandwidth[0]) * units.Hz\n \n if (inchan == vnchan) and vnchan > 1:\n log.debug(\n \"create_image_from_visibility: Defining %d channel Image at %s, starting frequency %s, and bandwidth %s\"\n % (inchan, imagecentre, reffrequency, channel_bandwidth))\n elif (inchan == 1) and vnchan > 1:\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining single channel MFS Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n elif inchan > 1 and vnchan > 1:\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining multi-channel MFS Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n elif (inchan == 1) and (vnchan == 1):\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining single channel Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n else:\n raise ValueError(\"create_image_from_visibility: unknown spectral mode \")\n \n # Image sampling options\n npixel = get_parameter(kwargs, \"npixel\", 512)\n uvmax = numpy.max((numpy.abs(vis.data['uvw'][:, 0:1])))\n if isinstance(vis, BlockVisibility):\n uvmax *= numpy.max(frequency) / constants.c.to('m s^-1').value\n log.debug(\"create_image_from_visibility: uvmax = %f wavelengths\" % uvmax)\n criticalcellsize = 1.0 / (uvmax * 2.0)\n log.debug(\"create_image_from_visibility: Critical cellsize = %f radians, %f degrees\" % (\n criticalcellsize, criticalcellsize * 180.0 / numpy.pi))\n cellsize = get_parameter(kwargs, \"cellsize\", 0.5 * criticalcellsize)\n log.debug(\"create_image_from_visibility: Cellsize = %g radians, %g degrees\" % (cellsize,\n cellsize * 180.0 / numpy.pi))\n override_cellsize = get_parameter(kwargs, \"override_cellsize\", True)\n if override_cellsize and cellsize > criticalcellsize:\n log.debug(\"create_image_from_visibility: Resetting cellsize %g radians to criticalcellsize %g radians\" % (\n cellsize, criticalcellsize))\n cellsize = criticalcellsize\n pol_frame = get_parameter(kwargs, \"polarisation_frame\", PolarisationFrame(\"stokesI\"))\n inpol = pol_frame.npol\n \n # Now we can define the WCS, which is a convenient place to hold the info above\n # Beware of python indexing order! wcs and the array have opposite ordering\n shape = [inchan, inpol, npixel, npixel]\n log.debug(\"create_image_from_visibility: image shape is %s\" % str(shape))\n w = wcs.WCS(naxis=4)\n # The negation in the longitude is needed by definition of RA, DEC\n w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth.to(units.Hz).value]\n # The numpy definition of the phase centre of an FFT is n // 2 (0 - rel) so that's what we use for\n # the reference pixel. We have to use 0 rel everywhere.\n w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0]\n w.wcs.ctype = [\"RA---SIN\", \"DEC--SIN\", 'STOKES', 'FREQ']\n w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, reffrequency.to(units.Hz).value]\n w.naxis = 4\n \n # TODO: Why is this check being done?\n # direction_centre = pixel_to_skycoord(npixel // 2 + 1, npixel // 2 + 1, wcs=w, origin=1)\n # assert direction_centre.separation(imagecentre).value < 1e-7, \\\n # \"Image phase centre [npixel//2, npixel//2] should be %s, actually is %s\" % \\\n # (str(imagecentre), str(direction_centre))\n \n w.wcs.radesys = get_parameter(kwargs, 'frame', 'ICRS')\n w.wcs.equinox = get_parameter(kwargs, 'equinox', 2000.0)\n \n return create_image_from_array(numpy.zeros(shape), wcs=w, polarisation_frame=pol_frame)", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def test_anon_public(self):\n self.do_visible(True, None, True)", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_visibility(self, data, visible):\n layer = Points(data)\n assert layer.visible is True\n\n layer = Points(data, visible=visible)\n assert layer.visible is visible\n\n layer.visible = not visible\n assert layer.visible is not visible", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def glance_update_and_set_public(glance, image, image_info):\n image_properties = image_info['image_properties']\n try:\n logger.debug(\"glance image update: properties=%s\", image_properties)\n glance.images.update(image.id, **image_properties)\n logger.debug(\"glance image update: visibility=public\")\n glance.images.update(image.id, visibility='public')\n except Exception:\n logger.exception(\"Updating (-> public) Glance image '%s' [%s] failed\", image.name, image.id)\n return 1\n\n return 0", "def builder_should_create_target_image(self, builder, target, image_id, template, parameters):", "def test_aws_service_api_private_image_get(self):\n pass", "def expose(self, cmd):\n\n expType = cmd.cmd.keywords[0].name\n if expType in ('bias', 'test'):\n expTime = 0.0\n else:\n expTime = cmd.cmd.keywords[\"expTime\"].values[0]\n\n filename, image = self._doExpose(cmd, expTime, expType)\n cmd.finish('exposureState=done')", "def test_aws_service_api_private_images_get(self):\n pass", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def targetWeldCtx(*args, exists: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", mergeToCenter:\n bool=True, q=True, query=True, e=True, edit=True, **kwargs)->Union[None,\n Any]:\n pass", "def create_image_allowed(self, create_image_allowed):\n self._create_image_allowed = create_image_allowed", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def embed_condition_images(condition_image,\n scope,\n reuse=tf.AUTO_REUSE,\n fc_layers = None,\n use_spatial_softmax = True):\n if len(condition_image.shape) != 4:\n raise ValueError('Image has unexpected shape {}.'.format(\n condition_image.shape))\n with tf.variable_scope(scope, reuse=reuse, use_resource=True):\n image_embedding, _ = vision_layers.BuildImagesToFeaturesModel(\n condition_image, use_spatial_softmax=use_spatial_softmax)\n if fc_layers is not None:\n if len(image_embedding.shape) == 2:\n image_embedding = layers.stack(\n image_embedding,\n layers.fully_connected,\n fc_layers[:-1],\n activation_fn=tf.nn.relu,\n normalizer_fn=layers.layer_norm)\n image_embedding = layers.fully_connected(\n image_embedding, fc_layers[-1], activation_fn=None)\n else:\n image_embedding = layers.stack(\n image_embedding,\n layers.conv2d,\n fc_layers[:-1],\n kernel_size=[1, 1],\n activation_fn=tf.nn.relu,\n normalizer_fn=layers.layer_norm)\n image_embedding = layers.conv2d(\n image_embedding, fc_layers[-1], activation_fn=None)\n return image_embedding", "def _doExpose(self, cmd, expTime, expType):\n \n image = self.actor.camera.expose(cmd, expTime, expType)\n filename = self.getNextFilename(cmd)\n pyfits.writeto(filename, image, checksum=False, clobber=True)\n cmd.inform(\"filename=%s\" % (qstr(filename)))\n \n return filename, image", "def projectionContext(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n name: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def maketestimage(self, *args, **kwargs):\n return _image.image_maketestimage(self, *args, **kwargs)", "def _create_request_context(\n self,\n url_name: str = 'review-request-detail',\n public: bool = True,\n status: str = ReviewRequest.PENDING_REVIEW,\n user: Optional[User] = None,\n can_edit_reviewrequest: bool = True,\n *args,\n **kwargs,\n ) -> Context:\n review_request = self.create_review_request(public=public,\n status=status)\n\n return Context({\n 'review_request': review_request,\n 'request': self.create_http_request(\n user=user or review_request.submitter,\n url_name=url_name),\n 'perms': {\n 'reviews': {\n 'can_edit_reviewrequest': can_edit_reviewrequest,\n },\n },\n })", "def expose(self):\n\n ## Determine type of exposure (exp, series, stack)\n exptype = str(self.exptypeComboBox.currentText())\n mode = self.modedict[exptype]\n\n ## Get exposure parameters\n if mode == \"bias\":\n exptime = 0.0\n else:\n exptime = self.exptimeSpinBox.value()\n imcount = self.imstackSpinBox.value()\n seqnum = self.imnumSpinBox.value()\n mintime = self.minexpSpinBox.value()\n maxtime = self.maxexpSpinBox.value()\n step = self.tstepSpinBox.value()\n\n ## Determine filter kwargs\n if self.filterToggleButton.isChecked():\n kwargs = {'filter_name' : str(self.filterComboBox.currentText())}\n else:\n kwargs = {'monowl' : self.monoSpinBox.value()}\n\n if self.testimCheckBox.isChecked():\n title = 'test'\n else:\n title = str(self.imtitleLineEdit.text())\n\n ## Build filepath\n filepath = os.path.join(str(self.imfilenameLineEdit.text()),title)\n \n ## Check if single exposure\n if exptype in [\"Exposure\", \"Dark\", \"Bias\"]:\n\n ## Perform exposure\n self.logger.info(\"Starting {0}s {1} image.\".format(exptime, exptype))\n self.image_start.emit(1)\n\n try:\n filename = exposure.im_acq(mode, filepath, exptime, seqnum, **kwargs)\n self.image_taken.emit(1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken\".format(mode))\n except IOError:\n self.logger.exception(\"File already exits. Image not taken.\")\n else:\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.seqnum_inc.emit(seqnum)\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename,\n '-zoom', 'to', 'fit', '-cmap', 'b'])\n\n ## Check if a stack of exposures of same type\n elif exptype in [\"Exposure Stack\", \"Dark Stack\", \"Bias Stack\"]:\n\n total = seqnum + imcount\n self.logger.info(\"Starting {0}s {1} stack.\".format(exptime, exptype))\n self.image_start.emit(imcount)\n\n try:\n for i in range(seqnum, total):\n self.logger.info(\"Starting image {0} of {1}.\".format(i+1-seqnum, imcount))\n filename = exposure.im_acq(mode, filepath, exptime, i, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1-seqnum)\n self.seqnum_inc.emit(i)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure stack finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n \n ## Check if a series of exposures of increase exposure time\n elif exptype in [\"Exposure Series\", \"Dark Series\"]:\n\n ## Parameter checks\n if mintime > maxtime:\n self.logger.warning(\"Minimum time must be less than Maximum time. Series not started.\")\n return\n elif step <= 0:\n self.logger.warning(\"Time step must be greater than 0. Series not started.\")\n return\n\n ## Construct array of exposure times\n t = mintime\n time_array = []\n while t <= maxtime:\n time_array.append(t)\n t += step\n \n ## Perform series\n self.logger.info(\"Starting {0} series with mintime {1}, maxtime {2}, and step {3}.\".format(exptype, mintime, maxtime, step))\n self.image_start.emit(len(time_array))\n \n try:\n for i, time in enumerate(time_array):\n self.logger.info(\"Starting {0}s {1} image.\".format(time, mode))\n filename = exposure.im_acq(mode, filepath, time, seqnum, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure series finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n self.seqnum_inc.emit(seqnum)", "def occupancy_grid(self, point_cloud=None, ss_pred=None, *, return_image=False, show=False, keep_showing=False):\n\t\tif point_cloud is None or ss_pred is None:\n\t\t\tgen = self.loop(point_cloud=True, ss_pred=True)\n\t\t\tpoint_cloud, ss_pred = next(gen)\n\t\t#TODO: Check that this function works with ZED camera\n\t\tpass", "def create_image(image_url, owner, permission=\"PRIVATE\"):\n\n image = Image(image_url=image_url,\n owner=owner,\n permission=permission)\n \n db.session.add(image)\n db.session.commit()\n return image", "def authorize(context, input_mock, getpass_mock, image_open_mock):\n with mock_login_requests(context):\n with mock_request(context, FileMock().security_image_gif):\n mock_func = ContextXmlMock(context).paychex_account_data\n with mock_request(context, mock_func):\n getpass_mock.return_value = context.password\n input_mock.return_value = context.security_answer\n arguments = {\n 'authorize': True,\n '--config': context.config_file,\n '<username>': context.username\n }\n pychex_cli = PychexCli(arguments)\n input_mock.assert_called_once_with(\n 'Is this your security image (Y/n)? ')\n assert image_open_mock.call_count == 1\n if context.security_answer in ['yes', 'y', 'ye', '']:\n getpass_mock.assert_called_once_with(\n 'Password (input hidden): ')\n assert pychex_cli.username == context.username", "def test_photo_classification_view_set_get_public(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n photo_models.PhotoClassification.objects.create_or_update(name='City', public=False)\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications')\n results = request.data['results']\n\n self.assertEquals(len(results), 13)", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def IsVisible(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_IsVisible(self, *args)", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def image_check(kwargs) -> bool:\n\n # Kwarg argument check\n return kwarg_check(\n kwargs=kwargs,\n options=[\n \"min_captured_at\",\n \"max_captured_at\",\n \"radius\",\n \"image_type\",\n \"organization_id\",\n \"fields\",\n ],\n callback=\"image_check\",\n )", "def create_image_allowed(self):\n return self._create_image_allowed", "def get_images_by_vulnerability(self, **kwargs):\n ...", "def get_public_crest_context(self):\n\n # use anonymous PyCrest as documented at http://pycrest.readthedocs.org/\n public_crest = pycrest.EVE()\n public_crest()\n\n tranquility_user_count = public_crest.userCounts.eve\n\n # fetch incursions and make them usable inside a Django template\n incursions = []\n for thing_that_looks_like_a_dict_but_isnt in public_crest.incursions().items:\n incursion = {}\n for key, value in thing_that_looks_like_a_dict_but_isnt._dict.iteritems():\n incursion[key] = value._dict if hasattr(value, '_dict') else value\n incursions.append(incursion)\n return {\n 'user_count': tranquility_user_count,\n 'incursions': incursions,\n }", "def is_private():", "def test_create_activity_as_context_check_ownership(self):\n from .mockers import user_status_as_context\n from .mockers import create_context\n from hashlib import sha1\n self.create_context(create_context)\n url_hash = sha1(create_context['url']).hexdigest()\n res = self.testapp.post('/contexts/%s/activities' % url_hash, json.dumps(user_status_as_context), oauth2Header(test_manager), status=201)\n self.assertEqual(res.json['actor']['hash'], url_hash)\n self.assertEqual(res.json['creator'], test_manager)\n self.assertEqual(res.json['owner'], test_manager)", "def contextInfo(*args, c: bool=True, escapeContext: bool=True, exists: bool=True, image1:\n bool=True, image2: bool=True, image3: bool=True, title: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def check_for_exposed(context):\n json_data = context.response.json()\n if \"exploitable_vulnerabilities_count\" in json_data:\n raise Exception(\"Field exploitable_vulnerabilities_count Exposed in\"\n \" Free user result\")\n if \"vendor_package_link\" in json_data:\n raise Exception(\"Field vendor_package_link has been exposed for free user\")", "def is_obj_visible(obj, scene, context=None, is_dupli=False):\r\n if is_dupli:\r\n return True\r\n\r\n # Mimic Blender behaviour: if object is duplicated via a parent, it should be invisible\r\n if obj.parent and obj.parent.dupli_type != \"NONE\":\r\n return False\r\n\r\n # Check if object is used as camera clipping plane\r\n if is_valid_camera(scene.camera) and obj == scene.camera.data.luxcore.clipping_plane:\r\n return False\r\n\r\n render_layer = get_current_render_layer(scene)\r\n if render_layer:\r\n # We need the list of excluded layers in the settings of this render layer\r\n exclude_layers = render_layer.layers_exclude\r\n else:\r\n # We don't account for render layer visiblity in viewport/preview render\r\n # so we create a mock list here\r\n exclude_layers = [False] * 20\r\n\r\n on_visible_layer = False\r\n for lv in [ol and sl and not el for ol, sl, el in zip(obj.layers, scene.layers, exclude_layers)]:\r\n on_visible_layer |= lv\r\n\r\n hidden_in_outliner = obj.hide if context else obj.hide_render\r\n return on_visible_layer and not hidden_in_outliner", "def check_for_no_privates(context):\n json_data = context.response.json()\n\n if \"component_analyses\" in json_data:\n vulnerabilities = json_data['component_analyses']['vulnerability']\n for v in vulnerabilities:\n assert \"cvss\" in v\n assert \"is_private\" in v\n assert \"vendor_cve_ids\" in v\n if v[\"is_private\"]:\n raise Exception(\"Private vulnerability found\")", "def IsInstanceVisible(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_IsInstanceVisible(self, *args)", "def test_visibility(self):\r\n self.assertFalse(self.net.environment\\\r\n .are_visible(self.net.pos[self.node1],\r\n self.net.pos[self.node2]))\r\n self.assertTrue(self.net.environment\\\r\n .are_visible(self.net.pos[self.node2],\r\n self.net.pos[self.node3]))", "def create_conv(obs,\n image_height,\n image_width,\n image_channels,\n ignore_flat_channels,\n ignore_image,\n filters,\n kernel_sizes,\n strides,\n act_fun,\n layer_norm,\n scope=None,\n reuse=False):\n with tf.compat.v1.variable_scope(scope, reuse=reuse):\n batch_size = tf.shape(obs)[0]\n image_size = image_height * image_width * image_channels\n\n original_pi_h = obs\n pi_h = original_pi_h[:, image_size:]\n\n ignored_indx = [\n i for i in range(pi_h.shape[1]) if i not in ignore_flat_channels]\n\n if len(ignored_indx) > 0:\n pi_h_ignored = tf.gather(pi_h, ignored_indx, axis=1)\n\n # Ignoring the image is useful for the lower level for creating an\n # abstraction barrier.\n if not ignore_image:\n pi_h_image = tf.reshape(\n original_pi_h[:, :image_size],\n [batch_size, image_height, image_width, image_channels]\n )\n\n # Create the hidden convolutional layers.\n for i, (filter_i, kernel_size_i, stride_i) in enumerate(zip(\n filters, kernel_sizes, strides)):\n pi_h_image = conv_layer(\n pi_h_image,\n filter_i,\n kernel_size_i,\n stride_i,\n 'conv{}'.format(i),\n act_fun=act_fun,\n layer_norm=layer_norm\n )\n\n h = pi_h_image.shape[1]\n w = pi_h_image.shape[2]\n c = pi_h_image.shape[3]\n pi_h = tf.concat(\n [tf.reshape(pi_h_image, [batch_size, h * w * c]) /\n tf.cast(h * w * c, tf.float32),\n pi_h], 1\n )\n if len(ignored_indx) > 0:\n pi_h = tf.concat([pi_h, pi_h_ignored], 1)\n\n return pi_h", "def expose(self, cmd, expTime, expType):\n\n if not expType:\n expType = 'test'\n if cmd:\n cmd.inform('exposureState=\"exposing\"')\n if expType not in ('bias', 'test') and expTime > 0:\n time.sleep(expTime + self._exposureOverheadTime())\n\n if cmd:\n cmd.inform('exposureState=\"reading\"')\n\n f = pyfits.open('/home/chyan/mhs/data/mcs/schmidt_fiber_snr400_rmod71.fits')\n image = f[0].data\n # image = numpy.random.normal(self.biasLevel,\n # scale=self.readNoise,\n # size=self.imageSize).astype('u2')\n\n if expType != 'test':\n time.sleep(self._readoutTime())\n return image", "def _evalContext(self):\n def xor(*args):\n return sum(args) == 1\n def neg(result):\n return not result\n context = {\n 'xor': xor,\n 'neg': neg\n }\n return context", "def get_instance_image(img, bbox, size_z, size_x, context_amount, img_mean=None):\n cx, cy, w, h = xyxy2cxcywh(bbox)\n wc_z = w + context_amount * (w+h)\n hc_z = h + context_amount * (w+h)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = size_z / s_z\n d_search = (size_x - size_z) / 2\n pad = d_search / scale_z\n s_x = s_z + 2 * pad\n scale_x = size_x / s_x\n instance_img = crop_and_pad(img, cx, cy, size_x, s_x, img_mean)\n return instance_img, scale_x, s_x", "def create_octree_image() -> bool:\n return async_octree and (create_image_type != CREATE_IMAGE_NORMAL)", "def _is_task_visible(context, task):\n # Is admin == task visible\n if context.is_admin:\n return True\n\n # No owner == task visible\n if task['owner'] is None:\n return True\n\n # Perform tests based on whether we have an owner\n if context.owner is not None:\n if context.owner == task['owner']:\n return True\n\n return False", "def test_get_activities_does_not_show_private_fields(self):\n from .mockers import context_query\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n username = 'messi'\n self.create_user(username)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.create_activity(username, user_status_context)\n res = self.testapp.get('/contexts/%s/activities' % (context_query['context']), '', oauth2Header(username), status=200)\n self.assertEqual(len(res.json), 1)\n self.assertNotIn('_keywords', res.json[0]['object'])", "def horizon_servers_with_private_image(request, cirros_image_private,\n security_group, net_subnet_router,\n flavor_steps, server_steps):\n count = int(getattr(request, 'param', 3))\n network, _, _ = net_subnet_router\n flavor = flavor_steps.get_flavor(name=config.HORIZON_TEST_FLAVOR_TINY)\n return server_steps.create_servers(image=cirros_image_private,\n flavor=flavor,\n count=count,\n networks=[network],\n security_groups=[security_group],\n username=config.CIRROS_USERNAME,\n password=config.CIRROS_PASSWORD)", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def _create_request_context(\n self,\n url_name: str = 'review-request-detail',\n public: bool = True,\n user: Optional[User] = None,\n *args,\n **kwargs,\n ) -> Context:\n review_request = self.create_review_request(public=public)\n\n return Context({\n 'request': self.create_http_request(\n user=user or review_request.submitter,\n url_name=url_name),\n 'review_request': review_request,\n })", "def view2dToolCtx(*args, alternateContext: bool=True, boxzoom: bool=True, dolly: bool=True,\n exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\",\n image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name:\n AnyStr=\"\", toolName: Union[AnyStr, bool]=\"\", track: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def directKeyCtx(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\",\n image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name:\n AnyStr=\"\", option: Union[AnyStr, bool]=\"\", selectedOnly: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def object_detection(self, img=None, *, return_image=False, show=False, keep_showing=False):\n\t\tif img is None:\n\t\t\timg, _ = self.capture()\n\n\t\tpred_bbox = self.od_model.predict(img)\n\n\t\tif show or return_image:\n\t\t\tpred_img = self.od_model.show_on_image(img, pred_bbox, show, keep_showing)\n\n\t\tif return_image:\n\t\t\treturn pred_bbox, pred_img\n\t\telse:\n\t\t\treturn pred_bbox", "def effector(*args, hide: bool=True, name: Union[AnyStr, bool]=\"\", q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def check_for_private_vul(context):\n json_data = context.response.json()\n\n if \"component_analyses\" in json_data:\n vulnerabilities = json_data['component_analyses']['vulnerability']\n for v in vulnerabilities:\n if v[\"is_private\"]:\n return\n raise Exception(\"No private vulnerability found\")", "def test_update_asset_visibility_query(self):\n pass", "def show_result(test_image, pred_mask):\n if (type(test_image) == torch.Tensor):\n test_image = transforms.ToPILImage()(test_image)\n else:\n test_image = Image.fromarray(test_image)\n new_mask = pred_mask\n grid = find_grid(Smooth(new_mask),new_mask)\n rectangles = find_rect(grid,np.asarray(test_image))\n grid_dewarped = create_image(rectangles)\n pixel_dewarped = Image.fromarray(pixel_remap(np.asarray(test_image),new_mask))\n return (grid_dewarped, pixel_dewarped)", "def generate(self, image, label, **kwargs):\n\n\n #check type device\n assert self.check_type_device(image, label)\n is_cuda = torch.cuda.is_available()\n\n if (is_cuda and self.device == 'cuda'):\n self.image = image.cuda()\n self.model = self.model.cuda()\n else:\n self.image = image\n\n assert self.parse_params(**kwargs)\n\n adv_img, self.r, self.ite = deepfool(self.model,\n self.image,\n self.num_classes,\n self.overshoot,\n self.max_iteration,\n self.device)\n return adv_img", "def get_image_vulnerabilities(self, **kwargs) -> ImageVulnerabilitiesReport:\n ...", "def paramDimContext(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n name: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def showManipCtx(*args, currentNodeName: bool=True, exists: bool=True, history: bool=True,\n image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3:\n Union[AnyStr, bool]=\"\", incSnap: Union[List[int, bool], List[List[int,\n bool]]]=None, incSnapRelative: Union[List[int, bool], List[List[int,\n bool]]]=None, incSnapUI: bool=True, incSnapValue: Union[List[int, float],\n List[List[int, float]], bool]=None, lockSelection: bool=True, name: AnyStr=\"\",\n toggleIncSnap: bool=True, toolFinish: Union[Script, bool]=None, toolStart:\n Union[Script, bool]=None, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def check_joints2d_visibility_torch(joints2d,\n img_wh,\n vis=None):\n if vis is None:\n vis = torch.ones(joints2d.shape[:2], device=joints2d.device, dtype=torch.bool)\n vis[joints2d[:, :, 0] > img_wh] = 0\n vis[joints2d[:, :, 1] > img_wh] = 0\n vis[joints2d[:, :, 0] < 0] = 0\n vis[joints2d[:, :, 1] < 0] = 0\n\n return vis", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def glance_list_owned_public_images(glance, owner_id, image_info):\n\n images = []\n list_kwargs = {'filters': {'visibility': 'public', 'owner': owner_id}}\n public_owned_images = glance.images.list(**list_kwargs)\n for image in public_owned_images:\n # only images with the \"same\" name ('TOTO' matches 'test_TOTO' or 'TOTO - 2016-10-03')\n if image_info['image_name'] in image.name:\n images.append(image)\n return images", "def _fake_only_visualize(fake_img, real_img, caption, num, num_per_caption, num_per_row, model):\n fake_img = _post_process(fake_img, model)\n grid = _make_grid(fake_img, cols=num_per_row)\n grid = tf.convert_to_tensor(grid, dtype=tf.uint8)\n return fake_img, grid", "def setEditCtx(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\",\n image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name: AnyStr=\"\",\n q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def srtContext(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\",\n image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name: AnyStr=\"\",\n q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_should_render_for_owner(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n can_edit_reviewrequest=False)))", "def isVisible(self, p_int): # real signature unknown; restored from __doc__\n return False", "def isVisible(self, p_int): # real signature unknown; restored from __doc__\n return False", "def preview(context):\n command = (\n f\"docker run -t \"\n f\"-e INPUT_LEANPUB-API-KEY={LEANPUB_API_KEY} \"\n f\"-e INPUT_LEANPUB-BOOK-SLUG={LEANPUB_BOOK_SLUG} \"\n f\"-e INPUT_PREVIEW=true\"\n f\"{IMAGE_NAME}:{IMAGE_VER}\"\n )\n # print(f\"{command}\") # Commenting out as this can print secrets\n context.run(f\"{command}\", pty=True)", "def selectContext(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\",\n image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name:\n AnyStr=\"\", q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass", "def compute_object_visibility(obj, cam, N=25, scene=None, view_layer=None, dist=None):\n scene = scene or bpy.context.scene\n vl = view_layer or bpy.context.view_layer\n src = cam.bpy_camera.matrix_world.translation\n dist = dist or 1.70141e38\n\n caminv = cam.bpy_camera.matrix_world.inverted()\n\n ids = np.random.choice(len(obj.data.vertices), size=N)\n vis = 0\n for idx in ids:\n dst_world = obj.matrix_world @ obj.data.vertices[idx].co\n d = (dst_world - src).normalized()\n dst_cam = caminv @ dst_world\n if dst_cam.z <= 0.0 and np.isfinite(d).all(): # view towards neg. z\n res, x, n, face, object, m = scene.ray_cast(vl, src, d, distance=dist)\n if res and object == obj:\n vis += 1\n del object, m, x, n, res\n del d, dst_world, dst_cam\n return vis / N", "def image(request, img_id):\n image = Image.objects.get(pk=img_id)\n if request.user.is_staff or image.is_approved:\n comments = ImageComment.objects.filter(image_id=img_id).order_by('-submission_date')\n comments_and_votes = Vote.objects.get_weighted_scores_in_bulk(comments, request.user)\n\n ctx = {\"img\":image,\n \"comments_and_votes\":comments_and_votes,\n \"image_tags\":image.tags.all(),\n \"all_tags\":Tag.objects.all(),\n \"site\":get_current_site(request)\n }\n return render_to_response('wainz/image.html', ctx , context_instance = RequestContext(request))\n else:\n return HttpResponseRedirect(reverse('wainz.views.composite'))", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def media_image_remotely_accessible(self) -> bool:\n return True" ]
[ "0.55633384", "0.54331094", "0.5389461", "0.5342315", "0.5338732", "0.53361624", "0.5331995", "0.53111786", "0.5263905", "0.519634", "0.5192311", "0.5132145", "0.51142937", "0.5112023", "0.50558454", "0.49886537", "0.49874082", "0.49178144", "0.47592", "0.4705346", "0.46915755", "0.46870592", "0.46700382", "0.46570787", "0.4648981", "0.4646109", "0.46335655", "0.46271664", "0.4621221", "0.46063378", "0.45997745", "0.4582135", "0.4581263", "0.4574506", "0.45634794", "0.45626977", "0.45533672", "0.45283917", "0.45031583", "0.449603", "0.449603", "0.449603", "0.449603", "0.449603", "0.449603", "0.449603", "0.449603", "0.449603", "0.449603", "0.449603", "0.44917893", "0.44894308", "0.44819766", "0.4481675", "0.44788027", "0.4467072", "0.446355", "0.4459468", "0.44593152", "0.4458453", "0.44437546", "0.44363925", "0.44344053", "0.4433204", "0.44288683", "0.44279662", "0.4423705", "0.44217536", "0.44195136", "0.44177127", "0.44074026", "0.44017872", "0.440095", "0.43919104", "0.4391687", "0.43889353", "0.43753242", "0.43682688", "0.43625298", "0.43608287", "0.43586978", "0.43569753", "0.43514088", "0.43477833", "0.43411252", "0.4337237", "0.43340015", "0.4325703", "0.4322179", "0.43200365", "0.43180093", "0.43106073", "0.4308606", "0.4308606", "0.43039903", "0.42910078", "0.42829415", "0.42802233", "0.42790064", "0.42750356" ]
0.7743895
0
Perform a context sharability test. Creates a (fake) image with the specified owner and is_public attributes, then creates a context with the given keyword arguments and expects exp_res as the result of an is_image_sharable() call on the context. If membership is not None, its value will be passed in as the 'membership' keyword argument of is_image_sharable().
def do_sharable(self, exp_res, img_owner, membership=None, **kwargs): img = FakeImage(img_owner, True) ctx = context.RequestContext(**kwargs) sharable_args = {} if membership is not None: sharable_args['membership'] = membership self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_auth_sharable_can_share(self):\n self.do_sharable(True, 'pattieblack', FakeMembership(True),\n tenant='froggy')", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_anon_shared(self):\n self.do_sharable(False, 'pattieblack', None)\n self.do_sharable(False, 'pattieblack', FakeMembership(True))", "def test_auth_sharable_owned(self):\n self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')", "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def test_empty_shared(self):\n self.do_sharable(False, 'pattieblack', None, is_admin=True)\n self.do_sharable(False, 'pattieblack', FakeMembership(True),\n is_admin=True)", "async def shrug(self,ctx,user: discord.Member=None):\n if user == None or user.id == ctx.author.id:\n await ctx.send(\"{}\".format(ctx.author.mention))\n else:\n await ctx.send(\"{} {}\".format(ctx.author.mention, user.mention))\n img = random.choice(self.getreaction(\"shrug\", \"0\"))\n embed = discord.Embed(colour=ctx.guild.me.top_role.colour)\n embed.set_image(url=img)\n await ctx.send(embed=embed)", "def test_auth_sharable(self):\n self.do_sharable(False, 'pattieblack', None, tenant='froggy')", "def builder_should_create_target_image(self, builder, target, image_id, template, parameters):", "def create_image_from_visibility(vis, **kwargs) -> Image:\n assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), \\\n \"vis is not a Visibility or a BlockVisibility: %r\" % (vis)\n \n log.debug(\"create_image_from_visibility: Parsing parameters to get definition of WCS\")\n \n imagecentre = get_parameter(kwargs, \"imagecentre\", vis.phasecentre)\n phasecentre = get_parameter(kwargs, \"phasecentre\", vis.phasecentre)\n \n # Spectral processing options\n ufrequency = numpy.unique(vis.frequency)\n vnchan = len(ufrequency)\n \n frequency = get_parameter(kwargs, \"frequency\", vis.frequency)\n inchan = get_parameter(kwargs, \"nchan\", vnchan)\n reffrequency = frequency[0] * units.Hz\n channel_bandwidth = get_parameter(kwargs, \"channel_bandwidth\", 0.99999999999 * vis.channel_bandwidth[0]) * units.Hz\n \n if (inchan == vnchan) and vnchan > 1:\n log.debug(\n \"create_image_from_visibility: Defining %d channel Image at %s, starting frequency %s, and bandwidth %s\"\n % (inchan, imagecentre, reffrequency, channel_bandwidth))\n elif (inchan == 1) and vnchan > 1:\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining single channel MFS Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n elif inchan > 1 and vnchan > 1:\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining multi-channel MFS Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n elif (inchan == 1) and (vnchan == 1):\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining single channel Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n else:\n raise ValueError(\"create_image_from_visibility: unknown spectral mode \")\n \n # Image sampling options\n npixel = get_parameter(kwargs, \"npixel\", 512)\n uvmax = numpy.max((numpy.abs(vis.data['uvw'][:, 0:1])))\n if isinstance(vis, BlockVisibility):\n uvmax *= numpy.max(frequency) / constants.c.to('m s^-1').value\n log.debug(\"create_image_from_visibility: uvmax = %f wavelengths\" % uvmax)\n criticalcellsize = 1.0 / (uvmax * 2.0)\n log.debug(\"create_image_from_visibility: Critical cellsize = %f radians, %f degrees\" % (\n criticalcellsize, criticalcellsize * 180.0 / numpy.pi))\n cellsize = get_parameter(kwargs, \"cellsize\", 0.5 * criticalcellsize)\n log.debug(\"create_image_from_visibility: Cellsize = %g radians, %g degrees\" % (cellsize,\n cellsize * 180.0 / numpy.pi))\n override_cellsize = get_parameter(kwargs, \"override_cellsize\", True)\n if override_cellsize and cellsize > criticalcellsize:\n log.debug(\"create_image_from_visibility: Resetting cellsize %g radians to criticalcellsize %g radians\" % (\n cellsize, criticalcellsize))\n cellsize = criticalcellsize\n pol_frame = get_parameter(kwargs, \"polarisation_frame\", PolarisationFrame(\"stokesI\"))\n inpol = pol_frame.npol\n \n # Now we can define the WCS, which is a convenient place to hold the info above\n # Beware of python indexing order! wcs and the array have opposite ordering\n shape = [inchan, inpol, npixel, npixel]\n log.debug(\"create_image_from_visibility: image shape is %s\" % str(shape))\n w = wcs.WCS(naxis=4)\n # The negation in the longitude is needed by definition of RA, DEC\n w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth.to(units.Hz).value]\n # The numpy definition of the phase centre of an FFT is n // 2 (0 - rel) so that's what we use for\n # the reference pixel. We have to use 0 rel everywhere.\n w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0]\n w.wcs.ctype = [\"RA---SIN\", \"DEC--SIN\", 'STOKES', 'FREQ']\n w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, reffrequency.to(units.Hz).value]\n w.naxis = 4\n \n # TODO: Why is this check being done?\n # direction_centre = pixel_to_skycoord(npixel // 2 + 1, npixel // 2 + 1, wcs=w, origin=1)\n # assert direction_centre.separation(imagecentre).value < 1e-7, \\\n # \"Image phase centre [npixel//2, npixel//2] should be %s, actually is %s\" % \\\n # (str(imagecentre), str(direction_centre))\n \n w.wcs.radesys = get_parameter(kwargs, 'frame', 'ICRS')\n w.wcs.equinox = get_parameter(kwargs, 'equinox', 2000.0)\n \n return create_image_from_array(numpy.zeros(shape), wcs=w, polarisation_frame=pol_frame)", "async def thumbsup(self,ctx,user: discord.Member=None):\n if user == None or user.id == ctx.author.id:\n await ctx.send(\"{}\".format(ctx.author.mention))\n else:\n await ctx.send(\"{} {}\".format(ctx.author.mention, user.mention))\n img = random.choice(self.getreaction(\"thumbsup\", \"0\"))\n embed = discord.Embed(colour=ctx.guild.me.top_role.colour)\n embed.set_image(url=img)\n await ctx.send(embed=embed)", "def maketestimage(self, *args, **kwargs):\n return _image.image_maketestimage(self, *args, **kwargs)", "def export_prepared_image(self, **kwargs):\n owner = kwargs.pop(\"owner\", None)\n indent = kwargs.pop(\"indent\", 2)\n key = _Texture(**kwargs)\n image = key.image\n\n if key not in self._pending:\n self._report.msg(\"Stashing '{}' for conversion as '{}'\", image.name, key, indent=indent)\n self._pending[key] = [owner.key,]\n else:\n self._report.msg(\"Found another user of '{}'\", key, indent=indent)\n self._pending[key].append(owner.key)", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "async def cry(self,ctx,user: discord.Member=None):\n if user == None or user.id == ctx.author.id:\n await ctx.send(\"{}\".format(ctx.author.mention))\n else:\n await ctx.send(\"{} {}\".format(ctx.author.mention, user.mention))\n img = random.choice(self.getreaction(\"cry\", \"0\"))\n embed = discord.Embed(colour=ctx.guild.me.top_role.colour)\n embed.set_image(url=img)\n await ctx.send(embed=embed)", "def create_image(image_url, owner, permission=\"PRIVATE\"):\n\n image = Image(image_url=image_url,\n owner=owner,\n permission=permission)\n \n db.session.add(image)\n db.session.commit()\n return image", "def create_image_allowed(self, create_image_allowed):\n self._create_image_allowed = create_image_allowed", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_auth_sharable_admin(self):\n self.do_sharable(True, 'pattieblack', None, tenant='froggy',\n is_admin=True)", "def get_images_by_vulnerability(self, **kwargs):\n ...", "def test_create_image(self):\n pass", "async def smug(self,ctx,user: discord.Member=None):\n if user == None or user.id == ctx.author.id:\n await ctx.send(\"{}\".format(ctx.author.mention))\n else:\n await ctx.send(\"{} {}\".format(ctx.author.mention, user.mention))\n img = random.choice(self.getreaction(\"smug\", \"0\"))\n embed = discord.Embed(colour=ctx.guild.me.top_role.colour)\n embed.set_image(url=img)\n await ctx.send(embed=embed)", "def targetWeldCtx(*args, exists: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", mergeToCenter:\n bool=True, q=True, query=True, e=True, edit=True, **kwargs)->Union[None,\n Any]:\n pass", "def srtContext(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\",\n image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name: AnyStr=\"\",\n q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def prepared_image_file(create_filesystem=True):\n # Create a 10 MB image file and a key file of 2048 bytes.\n execute('dd', 'if=/dev/zero', 'of=%s' % IMAGE_FILE, 'bs=1M', 'count=10')\n execute('dd', 'if=/dev/urandom', 'of=%s' % KEY_FILE, 'bs=512', 'count=4')\n # Encrypt and unlock the image file.\n execute('cryptsetup', '--batch-mode', 'luksFormat', IMAGE_FILE, KEY_FILE, sudo=True)\n # Create a filesystem on the encrypted image file?\n if create_filesystem:\n with unlocked_device(CRYPTO_NAME):\n execute('mkfs.ext4', FILESYSTEM_DEVICE, sudo=True)\n yield\n os.unlink(IMAGE_FILE)\n os.unlink(KEY_FILE)", "def test_compute_image_sharpness(self):\n yuv_full_scale = 1023.0\n chart_file = os.path.join(os.environ['CAMERA_ITS_TOP'], 'pymodules',\n 'its', 'test_images', 'ISO12233.png')\n chart = cv2.imread(chart_file, cv2.IMREAD_ANYDEPTH)\n white_level = numpy.amax(chart).astype(float)\n sharpness = {}\n for j in [2, 4, 8]:\n blur = cv2.blur(chart, (j, j))\n blur = blur[:, :, numpy.newaxis]\n sharpness[j] = (yuv_full_scale *\n its.image.compute_image_sharpness(blur /\n white_level))\n self.assertTrue(numpy.isclose(sharpness[2]/sharpness[4],\n numpy.sqrt(2), atol=0.1))\n self.assertTrue(numpy.isclose(sharpness[4]/sharpness[8],\n numpy.sqrt(2), atol=0.1))", "def image_check(kwargs) -> bool:\n\n # Kwarg argument check\n return kwarg_check(\n kwargs=kwargs,\n options=[\n \"min_captured_at\",\n \"max_captured_at\",\n \"radius\",\n \"image_type\",\n \"organization_id\",\n \"fields\",\n ],\n callback=\"image_check\",\n )", "def simple_test(self, img, img_meta, **kwargs):\n pass", "def shazoo(tree_adj, nodes_status, edge_weight, hinge_lines, nodes_sign,\n gold_sign):\n from grid_stretch import ancestor_info\n order = list(gold_sign.keys())\n random.shuffle(order)\n allpred = {}\n node = order[0]\n nodes_sign[node] = gold_sign[node]\n nodes_status[node] = REVEALED # no need for full reveal call\n ancestors = ancestor_info(tree_adj, node)\n allpred[node] = -1\n for node in order[1:]:\n pred = predict_node_sign(tree_adj, node, nodes_status, nodes_sign,\n hinge_lines, edge_weight)\n allpred[node] = pred\n nodes_sign[node] = gold_sign[node]\n reveal_node(tree_adj, node, nodes_status, hinge_lines, ancestors)\n mistakes = sum((1 for n, p in allpred.items() if p != gold_sign[n]))\n print('mistakes: {}'.format(mistakes))", "def authorize(context, input_mock, getpass_mock, image_open_mock):\n with mock_login_requests(context):\n with mock_request(context, FileMock().security_image_gif):\n mock_func = ContextXmlMock(context).paychex_account_data\n with mock_request(context, mock_func):\n getpass_mock.return_value = context.password\n input_mock.return_value = context.security_answer\n arguments = {\n 'authorize': True,\n '--config': context.config_file,\n '<username>': context.username\n }\n pychex_cli = PychexCli(arguments)\n input_mock.assert_called_once_with(\n 'Is this your security image (Y/n)? ')\n assert image_open_mock.call_count == 1\n if context.security_answer in ['yes', 'y', 'ye', '']:\n getpass_mock.assert_called_once_with(\n 'Password (input hidden): ')\n assert pychex_cli.username == context.username", "def test_create_image_signature(self):\n pass", "def snapshot_image_on_provider(self, builder, provider, credentials, target, template, parameters):", "def perm_escalate_helper(self, albumcontrol, request, testalbum, id, user, func, level):\n # assign anonymous user to requests\n request.user = user\n\n albumcontrol.set_accesstype(testalbum, ALBUM_PUBLIC)\n\n if level >= ALBUM_PUBLIC:\n response = func(request, id)\n self.assertEqual(response.status_code, 200)\n else:\n self.assertRaises(PermissionException, func, request, id)\n\n albumcontrol.set_accesstype(testalbum, ALBUM_ALLFRIENDS)\n\n if level >= ALBUM_ALLFRIENDS:\n response = func(request, id)\n self.assertEqual(response.status_code, 200)\n else:\n self.assertRaises(PermissionException, func, request, id)\n\n albumcontrol.set_accesstype(testalbum, ALBUM_GROUPS)\n\n if level >= ALBUM_GROUPS:\n response = func(request, id)\n self.assertEqual(response.status_code, 200)\n else:\n self.assertRaises(PermissionException, func, request, id)\n\n albumcontrol.set_accesstype(testalbum, ALBUM_PRIVATE)\n\n if level >= ALBUM_PRIVATE:\n response = func(request, id)\n self.assertEqual(response.status_code, 200)\n else:\n self.assertRaises(PermissionException, func, request, id)", "def builder_will_create_target_image(self, builder, target, image_id, template, parameters):", "def test_no_overdraw_skirmish(self):\n with self.assertRaises(db.InsufficientException):\n self.battle.create_skirmish(self.alice, 9999999)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 0)", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def get_image_vulnerabilities(self, **kwargs) -> ImageVulnerabilitiesReport:\n ...", "def paintShoes(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"bodySize\"], \"shoes\", self.avatarConfiguration[\"shoes\"] + IMG_EXTENSION))\n self.newAvatarImage(imgPath, \"shoes\")", "def test_run_resize_and_crop(self):\n self.expect_datatore_lookup('SomeBlobKey', True)\n self.expect_open_image('SomeBlobKey', (1600, 1200))\n self.expect_crop(left_x=0.125, right_x=0.875)\n self.expect_resize(32)\n self.expect_encode_image('SomeImageSize32')\n self.mox.ReplayAll()\n self._environ['PATH_INFO'] += '=s32-c'\n self.run_request('image/jpeg', 'SomeImageSize32')", "def create_and_submit(self, username):\r\n user = UserFactory.create()\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n user.profile.name = username\r\n attempt.upload_face_image(\"Fake Data\")\r\n attempt.upload_photo_id_image(\"More Fake Data\")\r\n attempt.mark_ready()\r\n attempt.submit()\r\n return attempt", "def upload_test_result_to_skia_gold(args, gold_session_manager, gold_session, gold_properties,\n screenshot_dir, trace, artifacts):\n\n use_luci = not (gold_properties.local_pixel_tests or gold_properties.no_luci_auth)\n\n # Determine if this trace is using a keyframe\n image_name = trace\n keyframe = get_trace_key_frame(trace)\n if keyframe != '':\n image_name = trace + '_frame' + keyframe\n logging.debug('Using %s as image_name for upload' % image_name)\n\n # Note: this would be better done by iterating the screenshot directory.\n prefix = SWIFTSHADER_SCREENSHOT_PREFIX if args.swiftshader else DEFAULT_SCREENSHOT_PREFIX\n png_file_name = os.path.join(screenshot_dir, prefix + image_name + '.png')\n\n if not os.path.isfile(png_file_name):\n raise Exception('Screenshot not found: ' + png_file_name)\n\n if args.use_permissive_pixel_comparison:\n # These arguments cause Gold to use the sample area inexact matching\n # algorithm. It is set to allow any of up to 3 pixels in each 4x4 group\n # of pixels to differ by any amount. Pixels that differ by a max of 1\n # on all channels (i.e. have differences that can be attributed to\n # rounding errors) do not count towards this limit.\n #\n # An image that passes due to this logic is auto-approved as a new good\n # image.\n inexact_matching_args = [\n '--add-test-optional-key',\n 'image_matching_algorithm:sample_area',\n '--add-test-optional-key',\n 'sample_area_width:4',\n '--add-test-optional-key',\n 'sample_area_max_different_pixels_per_area:3',\n '--add-test-optional-key',\n 'sample_area_channel_delta_threshold:1',\n ]\n else:\n # These arguments cause Gold to use the fuzzy inexact matching\n # algorithm. It is set to allow up to 20k pixels to differ by 1 on all\n # channels, which is meant to help reduce triage overhead caused by new\n # images from rounding differences.\n #\n # The max number of pixels is fairly arbitrary, but the diff threshold\n # is intentional since we don't want to let in any changes that can't be\n # attributed to rounding errors.\n #\n # An image that passes due to this logic is auto-approved as a new good\n # image.\n inexact_matching_args = [\n '--add-test-optional-key',\n 'image_matching_algorithm:fuzzy',\n '--add-test-optional-key',\n 'fuzzy_max_different_pixels:20000',\n '--add-test-optional-key',\n 'fuzzy_pixel_per_channel_delta_threshold:1',\n ]\n\n status, error = gold_session.RunComparison(\n name=image_name,\n png_file=png_file_name,\n use_luci=use_luci,\n inexact_matching_args=inexact_matching_args)\n\n artifact_name = os.path.basename(png_file_name)\n artifacts[artifact_name] = [artifact_name]\n\n if not status:\n return PASS\n\n status_codes = gold_session_manager.GetSessionClass().StatusCodes\n if status == status_codes.AUTH_FAILURE:\n logging.error('Gold authentication failed with output %s', error)\n elif status == status_codes.INIT_FAILURE:\n logging.error('Gold initialization failed with output %s', error)\n elif status == status_codes.COMPARISON_FAILURE_REMOTE:\n _, triage_link = gold_session.GetTriageLinks(image_name)\n if not triage_link:\n logging.error('Failed to get triage link for %s, raw output: %s', image_name, error)\n logging.error('Reason for no triage link: %s',\n gold_session.GetTriageLinkOmissionReason(image_name))\n if gold_properties.IsTryjobRun():\n # Pick \"show all results\" so we can see the tryjob images by default.\n triage_link += '&master=true'\n artifacts['triage_link_for_entire_cl'] = [triage_link]\n else:\n artifacts['gold_triage_link'] = [triage_link]\n elif status == status_codes.COMPARISON_FAILURE_LOCAL:\n logging.error('Local comparison failed. Local diff files:')\n output_diff_local_files(gold_session, image_name)\n elif status == status_codes.LOCAL_DIFF_FAILURE:\n logging.error(\n 'Local comparison failed and an error occurred during diff '\n 'generation: %s', error)\n # There might be some files, so try outputting them.\n logging.error('Local diff files:')\n output_diff_local_files(gold_session, image_name)\n else:\n logging.error('Given unhandled SkiaGoldSession StatusCode %s with error %s', status, error)\n\n return FAIL", "def test_transform_image_resize_and_crop_portrait(self):\n self.expect_open_image('SomeBlobKey', (148, 215))\n self.expect_crop(top_y=0.0, bottom_y=0.68837209302325575)\n self.expect_resize(32)\n self.expect_encode_image('SomeImageSize32-c')\n self.mox.ReplayAll()\n self.assertEquals(('SomeImageSize32-c', 'image/jpeg'),\n self.app._transform_image('SomeBlobKey', 's32-c'))\n self.mox.VerifyAll()", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def test_with_shots_option(self):\n params, target = self._generate_params_target([1])\n sampler = Sampler()\n result = sampler.run(\n circuits=[self._pqc], parameter_values=params, shots=1024, seed=15\n ).result()\n self._compare_probs(result.quasi_dists, target)", "def greasePencilCtx(*args, autoCreateFrames: bool=True, canDraw: bool=True, createOrEditFrame:\n Union[int, bool]=0, exists: bool=True, exportArchive: List[AnyStr,\n AnyStr]=None, fileTextureSize: Union[int, bool]=0, greasePencilType:\n Union[int, bool]=0, image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr,\n bool]=\"\", image3: Union[AnyStr, bool]=\"\", importArchive: AnyStr=\"\",\n makeStroke: Union[int, List[int], bool]=0, removeFrame: int=0,\n resetBrushes: bool=True, rgbcolor: Union[List[float, float, float],\n bool]=None, sequenceNodeName: Union[AnyStr, bool]=\"\", q=True, query=True,\n e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def create_image_allowed(self):\n return self._create_image_allowed", "def test_create_activity_as_context_check_ownership(self):\n from .mockers import user_status_as_context\n from .mockers import create_context\n from hashlib import sha1\n self.create_context(create_context)\n url_hash = sha1(create_context['url']).hexdigest()\n res = self.testapp.post('/contexts/%s/activities' % url_hash, json.dumps(user_status_as_context), oauth2Header(test_manager), status=201)\n self.assertEqual(res.json['actor']['hash'], url_hash)\n self.assertEqual(res.json['creator'], test_manager)\n self.assertEqual(res.json['owner'], test_manager)", "def get_instance_image(img, bbox, size_z, size_x, context_amount, img_mean=None):\n cx, cy, w, h = xyxy2cxcywh(bbox)\n wc_z = w + context_amount * (w+h)\n hc_z = h + context_amount * (w+h)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = size_z / s_z\n d_search = (size_x - size_z) / 2\n pad = d_search / scale_z\n s_x = s_z + 2 * pad\n scale_x = size_x / s_x\n instance_img = crop_and_pad(img, cx, cy, size_x, s_x, img_mean)\n return instance_img, scale_x, s_x", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "async def sleepy(self,ctx,user: discord.Member=None):\n if user == None or user.id == ctx.author.id:\n await ctx.send(\"{}\".format(ctx.author.mention))\n else:\n await ctx.send(\"{} {}\".format(ctx.author.mention, user.mention))\n img = random.choice(self.getreaction(\"sleepy\", \"0\"))\n embed = discord.Embed(colour=ctx.guild.me.top_role.colour)\n embed.set_image(url=img)\n await ctx.send(embed=embed)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_share_inactive_user(self):\n george = self.george\n alva = self.alva\n john = self.john\n bikes = self.bikes\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(alva),\n PrivilegeCodes.NONE)\n\n # inactive users can't be granted access\n # set john to an inactive user\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n john.is_active = True\n john.save()\n\n # inactive grantor can't grant access\n # let's first grant John access privilege\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(john),\n PrivilegeCodes.CHANGE)\n\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n john.uaccess.share_resource_with_user(\n bikes, alva, PrivilegeCodes.VIEW)", "def servicenow_sspm_public_access_to_favorites_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str):\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n\n # Name of the property to evaluate against\n evalTarget = \"glide.ui.magellan.favorites.allow_public\"\n # Get cached props\n sysPropCache = get_servicenow_sys_properties(cache)\n\n # There should not ever be a duplicate system property, use next() and a list comprehension to check if the\n # property we're evaluating is in the list of properties we get from the cache. If it is NOT then set the\n # value as `False` and we can fill in fake values. Not having a property for security hardening is the same\n # as a failed finding with a lot less fan fair\n propFinder = next((sysprop for sysprop in sysPropCache if sysprop[\"name\"] == evalTarget), False)\n # If we cannot find the property set \"NOT_CONFIGURED\" which will fail whatever the value should be\n if propFinder == False:\n propertyValue = \"NOT_CONFIGURED\"\n propDescription = \"\"\n propId = \"\"\n propCreatedOn = \"\"\n propCreatedBy = \"\"\n propUpdatedOn = \"\"\n propUpdatedBy = \"\"\n propScope = \"\"\n assetB64 = None\n else:\n propertyValue = str(propFinder[\"value\"])\n propDescription = str(propFinder[\"description\"]).replace(\"\\n \", \"\")\n propId = str(propFinder[\"sys_id\"])\n propCreatedOn = str(propFinder[\"sys_created_on\"])\n propCreatedBy = str(propFinder[\"sys_created_by\"])\n propUpdatedOn = str(propFinder[\"sys_updated_on\"])\n propUpdatedBy = str(propFinder[\"sys_updated_by\"])\n propScope = str(propFinder[\"sys_scope\"][\"value\"])\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(propFinder,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson) \n # NOTE: This is where the check evaluation happens - in SNOW these may be Bools or Numbers but will come back as Strings\n # always evaluate a failing condition first which should be the OPPOSITE of the SNOW reccomendation as sometimes the values\n # are not a simple Boolean expression\n if propertyValue != \"false\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.17] Instance should restrict public access to Favorites in the navigator\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does not restrict public access to Favorites in the navigator. Use the 'glide.ui.magellan.favorites.allow_public' to specify whether unauthenticated users are allowed to see Favorites in the navigator. Public Access to Favorites will be compliant if 'glide.ui.magellan.favorites.allow_public' is set to false. If this property is not enabled, there is a risk of unauthorized access to sensitive data. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Public access to favorites section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/public-access-favorites.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.17] Instance should restrict public access to Favorites in the navigator\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does restrict public access to Favorites in the navigator.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Public access to favorites section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/public-access-favorites.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def tofits(self, *args, **kwargs):\n return _image.image_tofits(self, *args, **kwargs)", "def __init__(self, image, x=0, y=0, itemtype='food'):\r\n ShiftableObject.__init__(self, x, y)\r\n self.type = str(itemtype)\r\n self.image = load_image(image)\r\n self.width = self.image.get_width()\r\n self.height = self.image.get_height()\r\n self.visible = True\r\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)", "def get_exemplar_image(img, bbox, size_z, context_amount, img_mean=None):\n cx, cy, w, h = xyxy2cxcywh(bbox)\n\n wc_z = w + context_amount * (w+h)#w+0.5*(w+h)\n hc_z = h + context_amount * (w+h)#h+0.5*(w+h)\n #orginal_sz\n s_z = np.sqrt(wc_z * hc_z)\n #model_sz\n scale_z = size_z / s_z\n exemplar_img = crop_and_pad(img, cx, cy, size_z, s_z, img_mean)\n return exemplar_img, scale_z, s_z", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def create_and_submit(self):\r\n user = UserFactory.create()\r\n attempt = SoftwareSecurePhotoVerification(user=user)\r\n user.profile.name = u\"Rust\\u01B4\"\r\n\r\n attempt.upload_face_image(\"Just pretend this is image data\")\r\n attempt.upload_photo_id_image(\"Hey, we're a photo ID\")\r\n attempt.mark_ready()\r\n attempt.submit()\r\n\r\n return attempt", "def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n magnitude = random_negative(self.magnitude, self.random_negative_prob)\n self._shear_img(results, magnitude, self.direction, self.interpolation)\n return results", "async def shibe(self, ctx: Message):\n\t\timage_url = requests.get(\"https://shibe.online/api/shibes?count=1\").json()[0]\n\t\tawait self.send(image_url, whisper=[ctx.author.id])", "def stresstest(*args, **kwargs):\n def decorator(f):\n if 'class_setup_per' in kwargs:\n setattr(f, \"st_class_setup_per\", kwargs['class_setup_per'])\n else:\n setattr(f, \"st_class_setup_per\", 'process')\n if 'allow_inheritance' in kwargs:\n setattr(f, \"st_allow_inheritance\", kwargs['allow_inheritance'])\n else:\n setattr(f, \"st_allow_inheritance\", False)\n attr(type='stress')(f)\n return f\n return decorator", "def test_supply_ambush(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 2)\n s2 = s1.react(self.alice, 2, hinder=False)\n s2.react(self.bob, 2, troop_type=\"cavalry\")\n\n # Alice still wins, though - the margin attack is just to stop\n # reinforcements\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.alice.team)", "def testSmallSrc(self):\n fromWcs = afwGeom.makeSkyWcs(\n crpix=lsst.geom.Point2D(0, 0),\n crval=lsst.geom.SpherePoint(359, 0, lsst.geom.degrees),\n cdMatrix=afwGeom.makeCdMatrix(scale=1.0e-8*lsst.geom.degrees),\n )\n fromExp = afwImage.ExposureF(afwImage.MaskedImageF(1, 1), fromWcs)\n\n toWcs = afwGeom.makeSkyWcs(\n crpix=lsst.geom.Point2D(0, 0),\n crval=lsst.geom.SpherePoint(358, 0, lsst.geom.degrees),\n cdMatrix=afwGeom.makeCdMatrix(scale=1.1e-8*lsst.geom.degrees),\n )\n toExp = afwImage.ExposureF(afwImage.MaskedImageF(10, 10), toWcs)\n\n warpControl = afwMath.WarpingControl(\"lanczos3\")\n # if a bug described in ticket #2441 is present, this will raise an\n # exception:\n numGoodPix = afwMath.warpExposure(toExp, fromExp, warpControl)\n self.assertEqual(numGoodPix, 0)\n self.assertTrue(np.all(np.isnan(toExp.image.array)))\n self.assertTrue(np.all(np.isinf(toExp.variance.array)))\n noDataBitMask = afwImage.Mask.getPlaneBitMask(\"NO_DATA\")\n self.assertTrue(np.all(toExp.mask.array == noDataBitMask))", "def scaleKeyCtx(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\",\n image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name: AnyStr=\"\",\n scaleSpecifiedKeys: bool=True, type: Union[AnyStr, bool]=\"\", q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_no_adds_to_overdraw_skirmish(self):\n s1 = self.battle.create_skirmish(self.alice, 99)\n with self.assertRaises(db.InsufficientException):\n s1.react(self.alice, 2, hinder=False)\n\n n = (self.sess.query(db.SkirmishAction).filter_by(parent_id=None).\n filter_by(participant=self.alice)).count()\n self.assertEqual(n, 1)", "def run(self, reuse=False):\n build = True\n if 'shub://' in self.action['uses']:\n image = self.action['uses']\n build = False\n elif './' in self.action['uses']:\n image = 'action/' + os.path.basename(self.action['uses'])\n singularityfile_path = os.path.join(\n os.getcwd(), self.action['uses'])\n else:\n image = '/'.join(self.action['uses'].split('/')[:2])\n singularityfile_path = os.path.join(self.action['repo_dir'],\n self.action['action_dir'])\n\n if not reuse:\n if self.singularity_exists():\n self.singularity_rm()\n if build:\n self.singularity_build(singularityfile_path, image)\n else:\n self.singularity_pull(image)\n else:\n if not self.singularity_exists():\n if build:\n self.singularity_build(singularityfile_path, image)\n else:\n self.singularity_pull(image)\n\n e = self.singularity_start(image)\n\n if e != 0:\n pu.fail('Action {} failed!\\n'.format(self.action['name']))", "def permits(identity, obj, permission):\n return False", "def test_transform_image_resize_and_crop_landscape(self):\n self.expect_open_image('SomeBlobKey', (1200, 1600))\n self.expect_crop(top_y=0.0, bottom_y=0.75)\n self.expect_resize(32)\n self.expect_encode_image('SomeImageSize32-c')\n self.mox.ReplayAll()\n self.assertEquals(('SomeImageSize32-c', 'image/jpeg'),\n self.app._transform_image('SomeBlobKey', 's32-c'))\n self.mox.VerifyAll()", "def share(config: Config, ami: str, account: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n ec2_client.modify_image_attribute(\n ImageId=ami,\n LaunchPermission={\"Add\": [{\"UserId\": account}]},\n OperationType=\"add\",\n UserIds=[account],\n Value=\"string\",\n DryRun=False,\n )", "async def smug(self, ctx, *, user: discord.Member):\n\n author = ctx.message.author\n smugs = nekos.img('smug')\n\n # Build Embed\n embed = discord.Embed(color=0xffc2ff)\n embed.description = f\"**{author.name} smugs at {user.name}**\"\n embed.set_footer(text=\"Made with the help of nekos.life\")\n embed.set_image(url=smugs)\n await ctx.send(embed=embed)", "def side_effect_create_mock_appimage(appimage_path):\n\n def _side_effect(*args, **kwargs):\n create_mock_appimage(appimage_path=appimage_path)\n return \"new-downloaded-file\"\n\n return _side_effect", "def _cache_image(self, instance):\n\n image_name = '%s.tar.gz' % instance['image_id']\n full_image_path = '%s/%s' % (FLAGS.ovz_image_template_dir, image_name)\n\n if not os.path.exists(full_image_path):\n # These objects are required to retrieve images from the object store.\n # This is known only to work with glance so far but as I understand it\n # glance's interface matches that of the other object stores.\n user = manager.AuthManager().get_user(instance['user_id'])\n project = manager.AuthManager().get_project(instance['project_id'])\n\n # Grab image and place it in the image cache\n images.fetch(instance['image_id'], full_image_path, user, project)\n return True\n else:\n return False", "def filters(im, detail=False, sharpen=False, **kwargs):\n filters = []\n if detail:\n filters.append(('detail', True))\n if sharpen:\n filters.append(('sharpen', True))\n return im", "def test_impact_for_exp_with_no_ratings(self):\n # Sign up a user and have them create an exploration.\n user_a_id = self._sign_up_user(\n self.USER_A_EMAIL, self.USER_A_USERNAME)\n self._create_exploration(self.EXP_ID_1, user_a_id)\n user_stats_model = user_models.UserStatsModel.get(\n user_a_id, strict=False)\n self.assertEqual(user_stats_model, None)", "def sai_testbed(\n duthost,\n request,\n ptfhost,\n start_sai_test_container,\n prepare_ptf_server):\n try: \n _setup_dut(ptfhost, request)\n yield \n finally: \n _teardown_dut(duthost, ptfhost, request)", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def make_critics(self, obs=None, action=None, reuse=False,\n scope=\"values_fn\", create_vf=True, create_qf=True):\n raise NotImplementedError", "def special(self, game, player):\n attacks = [_ for _ in player.piles[Piles.HAND] if _.isAttack()]\n if attacks:\n options = [(\"Don't reveal\", None)]\n options.extend([(f\"Reveal {_.name}\", _) for _ in attacks])\n reveal = player.plr_choose_options(\"Reveal attack to pickup a card\", *options)\n if reveal:\n player.reveal_card(reveal)\n player.pickup_card()\n # Rotate pile selection\n piles = list(game.cardpiles.keys())\n piles.sort()\n options = [(\"Don't do anything\", False)]\n for pile in piles:\n options.append((f\"Rotate {pile}\", pile))\n opt = player.plr_choose_options(\"Rotate a pile?\", *options)\n if opt:\n game[opt].rotate()", "def prepare_test_img(self, idx):\n img_info = self.img_infos[idx]\n img_path = osp.join(self.img_prefix, img_info['filename'])\n\n if self.proposals is not None:\n proposal = self.proposals[idx][:self.num_max_proposals]\n if not proposal.shape[1] == 4 or proposal.shape[1] == 5:\n raise AssertionError(\n 'proposals should have shapes (n, 4) or (n, 5), '\n 'but found {}'.format(proposal.shape))\n else:\n proposal = None\n\n if self.with_background_erasing:\n ann = self.get_ann_info(idx)\n gt_bboxes = ann['bboxes']\n else:\n gt_bboxes = None\n\n def prepare_single_scale(img_path, expected_size, flip_ratio=0,\n proposal=None, bbox=None):\n _img, img_shape, pad_shape, scale_factor, \\\n flipped_flag, flipped_direction = self.img_transforms(\n img_path, expected_size, flip_ratio=flip_ratio)\n if bbox is not None:\n if not len(bbox) == 0:\n _gt_bboxes = self.bbox_transforms(bbox,\n img_shape,\n scale_factor,\n flipped_flag,\n flipped_direction)\n else:\n _gt_bboxes = bbox\n _img = self.background_erasing(\n _img, img_shape, _gt_bboxes,\n cell_size=self.be_cell_size,\n random_ratio=self.be_random_ratio)\n _img = to_tensor(_img)\n _img_meta = dict(\n filename=img_info['filename'],\n ori_shape=(img_info['height'], img_info['width'], 3),\n img_shape=img_shape,\n pad_shape=pad_shape,\n scale_factor=scale_factor,\n flipped_flag=flipped_flag,\n flipped_direction=flipped_direction\n )\n if proposal is not None:\n if proposal.shape[1] == 5:\n score = proposal[:, 4, None]\n proposal = proposal[:, :4]\n else:\n score = None\n _proposal = self.bbox_transforms(proposal,\n img_shape,\n scale_factor,\n flipped_flag,\n flipped_direction)\n _proposal = np.hstack([_proposal, score]) \\\n if score is not None else _proposal\n _proposal = to_tensor(_proposal)\n else:\n _proposal = None\n return _img, _img_meta, _proposal\n\n imgs = []\n img_metas = []\n proposals = []\n for expected_size in self.img_expected_sizes:\n # at first, we do not flip the image\n _img, _img_meta, _proposal = prepare_single_scale(\n img_path, expected_size, flip_ratio=0,\n proposal=proposal, bbox=gt_bboxes)\n imgs.append(_img)\n img_metas.append(DataContainer(_img_meta, cpu_only=True))\n proposals.append(_proposal)\n if self.flip_ratio > 0:\n _img, _img_meta, _proposal = prepare_single_scale(\n img_path, expected_size, flip_ratio=1,\n proposal=proposal, bbox=gt_bboxes)\n imgs.append(_img)\n img_metas.append(DataContainer(_img_meta, cpu_only=True))\n proposals.append(_proposal)\n data = dict(img=imgs, img_meta=img_metas)\n if self.proposals is not None:\n data['proposals'] = proposals\n return data", "def image(request, img_id):\n image = Image.objects.get(pk=img_id)\n if request.user.is_staff or image.is_approved:\n comments = ImageComment.objects.filter(image_id=img_id).order_by('-submission_date')\n comments_and_votes = Vote.objects.get_weighted_scores_in_bulk(comments, request.user)\n\n ctx = {\"img\":image,\n \"comments_and_votes\":comments_and_votes,\n \"image_tags\":image.tags.all(),\n \"all_tags\":Tag.objects.all(),\n \"site\":get_current_site(request)\n }\n return render_to_response('wainz/image.html', ctx , context_instance = RequestContext(request))\n else:\n return HttpResponseRedirect(reverse('wainz.views.composite'))", "def test_do_boss_science(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['boss_science'])\n nExp = 2\n self._do_boss_science(nExp, 35, 0, 0, nExp=nExp)", "def shelved_predicate(interactions_df, obs_interactions, target_interactions, truth_interactions, fold, setting):\n \n print(\"predicate_construction: shelved_predicate:\")\n \n def write(s, p):\n print(\"predicate_construction: shelved_predicate: writing: \" + \n './goodreads/' + str(fold) + '/' + setting + '/shelved_' + p + '.txt' )\n s.to_csv('./goodreads/' + str(fold) + '/' + setting + '/shelved_' + p + '.txt',\n sep='\\t', header=False, index=True)\n\n # observed predicates\n partition = 'obs'\n shelved_series = pd.Series(data=1, index=obs_interactions, name='shelved')\n write(shelved_series, partition)\n\n # truth predicates\n partition = 'truth'\n shelved_series = pd.Series(data=1, index=truth_interactions, name='shelved')\n write(shelved_series, partition)\n\n # target predicates\n partition = 'targets'\n shelved_df = pd.DataFrame(index=target_interactions)\n write(shelved_df, partition)", "def cli(env, identifier, account_id):\n\n image_mgr = SoftLayer.ImageManager(env.client)\n image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')\n shared_image = image_mgr.share_image(image_id, account_id)\n\n if shared_image:\n env.fout(f\"Image template {identifier} was shared to account {account_id}.\")", "def test_aws_service_api_private_images_get(self):\n pass", "async def test(\n self,\n has_perm,\n req_perm,\n mocker,\n snapshot,\n spawn_client,\n static_time,\n no_permissions,\n fake2,\n ):\n mocker.patch(\n \"virtool.utils.generate_key\", return_value=(\"raw_key\", \"hashed_key\")\n )\n\n group = await fake2.groups.create(\n UpdatePermissionsRequest(**{Permission.create_sample: True})\n )\n\n client = await spawn_client(authorize=True)\n\n if has_perm:\n await client.db.users.update_one(\n {\"_id\": \"test\"},\n {\"$set\": {\"groups\": [group.id]}},\n )\n\n body = {\"name\": \"Foobar\"}\n\n if req_perm:\n body[\"permissions\"] = {Permission.create_sample.value: True}\n\n resp = await client.post(\"/account/keys\", body)\n\n assert resp.status == 201\n assert await resp.json() == snapshot\n assert await client.db.keys.find_one() == snapshot", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def test_negative_silhouette_score(self):\n raise NotImplementedError(\"no negative silhouette example available\")", "async def teehee(self,ctx,user: discord.Member=None):\n if user == None or user.id == ctx.author.id:\n await ctx.send(\"{}\".format(ctx.author.mention))\n else:\n await ctx.send(\"{} {}\".format(ctx.author.mention, user.mention))\n img = random.choice(self.getreaction(\"teehee\", \"0\"))\n embed = discord.Embed(colour=ctx.guild.me.top_role.colour)\n embed.set_image(url=img)\n await ctx.send(embed=embed)", "def Sharpness(img: Image, magnitude: float) -> Image:\n return PIL.ImageEnhance.Sharpness(img).enhance(\n 1 + magnitude * random.choice([-1, 1])\n )", "def test_profile_image_request_for_null_endorsed_by(self):\n self.register_get_user_response(self.user)\n thread = self.make_minimal_cs_thread({\n \"thread_type\": \"question\",\n \"endorsed_responses\": [make_minimal_cs_comment({\n \"id\": \"endorsed_comment\",\n \"user_id\": self.user.id,\n \"username\": self.user.username,\n \"endorsed\": True,\n })],\n \"non_endorsed_resp_total\": 0,\n })\n self.register_get_thread_response(thread)\n self.create_profile_image(self.user, get_profile_image_storage())\n\n response = self.client.get(self.url, {\n \"thread_id\": thread[\"id\"],\n \"endorsed\": True,\n \"requested_fields\": \"profile_image\",\n })\n assert response.status_code == 200\n response_comments = json.loads(response.content.decode('utf-8'))['results']\n for response_comment in response_comments:\n expected_author_profile_data = self.get_expected_user_profile(response_comment['author'])\n response_users = response_comment['users']\n assert expected_author_profile_data == response_users[response_comment['author']]\n assert response_comment['endorsed_by'] not in response_users", "def ShearX(img: Image, magnitude: float) -> Image:\n return img.transform(\n img.size,\n PIL.Image.AFFINE,\n (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),\n PIL.Image.BICUBIC,\n fillcolor=FILLCOLOR,\n )", "def SIFT_create(nfeatures=None, nOctaveLayers=None, contrastThreshold=None, edgeThreshold=None, sigma=None): # real signature unknown; restored from __doc__\n pass", "def wrinkleContext(*args, branchCount: Union[int, bool]=2, branchDepth: Union[int, bool]=0,\n exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\",\n image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name:\n AnyStr=\"\", randomness: Union[float, bool]=0.0, style: Union[AnyStr,\n bool]=\"radial\", thickness: Union[float, bool]=0.0, wrinkleCount: Union[int,\n bool]=3, wrinkleIntensity: Union[float, bool]=0.0, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_run_resize_and_crop_png(self):\n self.expect_datatore_lookup('SomeBlobKey', True)\n self.expect_open_image('SomeBlobKey', (1600, 1200), mime_type='PNG')\n self.expect_crop(left_x=0.125, right_x=0.875)\n self.expect_resize(32)\n self.expect_encode_image('SomeImageSize32',\n images_service_pb.OutputSettings.PNG)\n self.mox.ReplayAll()\n self._environ['PATH_INFO'] += '=s32-c'\n self.run_request('image/png', 'SomeImageSize32')", "def forward_test(self, img, img_metas, **kwargs):", "def create_for_data_processing(\n cls, \n env_name, \n camera_names, \n camera_height, \n camera_width, \n reward_shaping, \n **kwargs,\n ):\n is_v1 = (robosuite.__version__.split(\".\")[0] == \"1\")\n has_camera = (len(camera_names) > 0)\n\n new_kwargs = {\n \"reward_shaping\": reward_shaping,\n }\n\n if has_camera:\n if is_v1:\n new_kwargs[\"camera_names\"] = list(camera_names)\n new_kwargs[\"camera_heights\"] = camera_height\n new_kwargs[\"camera_widths\"] = camera_width\n else:\n assert len(camera_names) == 1\n if has_camera:\n new_kwargs[\"camera_name\"] = camera_names[0]\n new_kwargs[\"camera_height\"] = camera_height\n new_kwargs[\"camera_width\"] = camera_width\n\n kwargs.update(new_kwargs)\n\n # also initialize obs utils so it knows which modalities are image modalities\n image_modalities = list(camera_names)\n if is_v1:\n image_modalities = [\"{}_image\".format(cn) for cn in camera_names]\n elif has_camera:\n # v0.3 only had support for one image, and it was named \"image\"\n assert len(image_modalities) == 1\n image_modalities = [\"image\"]\n obs_modality_specs = {\n \"obs\": {\n \"low_dim\": [], # technically unused, so we don't have to specify all of them\n \"image\": image_modalities,\n }\n }\n ObsUtils.initialize_obs_utils_with_obs_specs(obs_modality_specs)\n\n # note that @postprocess_visual_obs is False since this env's images will be written to a dataset\n return cls(\n env_name=env_name,\n render=False, \n render_offscreen=has_camera, \n use_image_obs=has_camera, \n postprocess_visual_obs=False,\n **kwargs,\n )", "def get_context_data(self, **kwargs):\n try:\n user = self.request.user\n if user != self.character.player_ob and not (\n user.is_staff or user.check_permstring(\"builders\")\n ):\n raise PermissionDenied\n except AttributeError:\n raise PermissionDenied\n return super(FlashbackCreateView, self).get_context_data(**kwargs)" ]
[ "0.54896724", "0.54389876", "0.52578163", "0.5243541", "0.5127175", "0.50320935", "0.49899673", "0.49097702", "0.4775629", "0.47158346", "0.47047496", "0.4655331", "0.4621686", "0.46000612", "0.45601118", "0.45558378", "0.45160365", "0.4514471", "0.45083925", "0.45038795", "0.44405332", "0.44366503", "0.43388307", "0.43358678", "0.43294334", "0.43245113", "0.4307198", "0.43041328", "0.42939383", "0.42830762", "0.42802048", "0.4278859", "0.4275645", "0.42706853", "0.42359623", "0.4235265", "0.4233231", "0.4233024", "0.42258507", "0.42179936", "0.42118526", "0.42092755", "0.4205289", "0.42038357", "0.42034587", "0.41982883", "0.4191532", "0.4186312", "0.4144778", "0.4143843", "0.41399902", "0.4139024", "0.41374585", "0.4131948", "0.41247272", "0.4117805", "0.41122863", "0.41101792", "0.41095632", "0.4103727", "0.41016898", "0.40981668", "0.40940398", "0.40782794", "0.40782523", "0.40759182", "0.4074375", "0.40699798", "0.40696698", "0.4065963", "0.4064027", "0.40612417", "0.40611637", "0.4048987", "0.40425128", "0.40182704", "0.40130284", "0.40080643", "0.4007703", "0.40065157", "0.39963368", "0.39930958", "0.39922166", "0.39909276", "0.3984536", "0.39843947", "0.3982666", "0.39821324", "0.3981697", "0.3980835", "0.39747348", "0.39734644", "0.39704108", "0.3970161", "0.39700457", "0.3965836", "0.39657855", "0.39627892", "0.39615053", "0.39595547" ]
0.8215476
0
Tests that an empty context (with is_admin set to True) can access an image with is_public set to True.
def test_empty_public(self): self.do_visible(True, None, True, is_admin=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_aws_service_api_private_images_get(self):\n pass", "def test_aws_service_api_private_image_get(self):\n pass", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_aws_service_api_public_image_get(self):\n pass", "def test_aws_service_api_public_images_get(self):\n pass", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_show_host_not_exist(self):\n self.req.environ['cinder.context'].is_admin = True\n dest = 'dummydest'\n self.assertRaises(webob.exc.HTTPNotFound,\n self.controller.show,\n self.req, dest)", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_api_thumbnail_administrator_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_should_render_with_unpublished(self) -> None:\n self.assertFalse(self.action.should_render(\n context=self._create_request_context(public=False)))", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_public(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"public\"})", "def test_anon_public(self):\n self.do_visible(True, None, True)", "def test_photo_classification_view_set_get_public(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n photo_models.PhotoClassification.objects.create_or_update(name='City', public=False)\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications')\n results = request.data['results']\n\n self.assertEquals(len(results), 13)", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_default_publish(self):\n self.assertIs(self.photo.published, 'Public')", "def test_aws_service_api_image_get(self):\n pass", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_an_admin_view_anonymous(client):\n response = client.get('/admin/')\n assert status(response) == 'found'\n assert response.url.startswith('/admin/login/')", "def test_get_all_accessible_by_id_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_id_list(\n self.template_id_list, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_list_image(self):\n pass", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_public(client, url):\n response = client.get(url, secure=True)\n assert response.status_code == 200", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))", "def test_image_import(self):\r\n module_store = modulestore('direct')\r\n\r\n content_store = contentstore()\r\n\r\n # Use conditional_and_poll, as it's got an image already\r\n import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['conditional_and_poll'],\r\n static_content_store=content_store\r\n )\r\n\r\n course = module_store.get_courses()[0]\r\n\r\n # Make sure the course image is set to the right place\r\n self.assertEqual(course.course_image, 'images_course_image.jpg')\r\n\r\n # Ensure that the imported course image is present -- this shouldn't raise an exception\r\n asset_key = course.id.make_asset_key('asset', course.course_image)\r\n content_store.find(asset_key)", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.global_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_ami_exists(self) -> None:\n owner = self.sts.get_caller_identity().get('Account')\n amis = self.ec2_client.describe_images(\n Owners=[owner],\n Filters=[{\n 'Name': 'name',\n 'Values': ['saints-xctf-web-server*']\n }]\n )\n self.assertTrue(len(amis.get('Images')) > 0)", "def test_should_render_with_user_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='doc')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n if getattr(self, 'read_only_always_show', False):\n self.assertTrue(\n self.action.should_render(context=request_context))\n else:\n self.assertFalse(\n self.action.should_render(context=request_context))", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def test_make_reusableitem_public_not_owner(self):\n\n # ensure is_public is false to start with\n original_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n original_reusableitem.is_public = False\n original_reusableitem.save()\n\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'is_public': True}, format='json')\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_get_all_as_anonymous_with_access_right_returns_global_templates(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 1)\n self.assertTrue((template.user is None for template in templates))", "def test_no_io_on_url():\n file = get_image_cache_file()\n file.url\n assert not file.storage.exists.called\n assert not file.storage.open.called", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def glance_update_and_set_public(glance, image, image_info):\n image_properties = image_info['image_properties']\n try:\n logger.debug(\"glance image update: properties=%s\", image_properties)\n glance.images.update(image.id, **image_properties)\n logger.debug(\"glance image update: visibility=public\")\n glance.images.update(image.id, visibility='public')\n except Exception:\n logger.exception(\"Updating (-> public) Glance image '%s' [%s] failed\", image.name, image.id)\n return 1\n\n return 0", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_api_thumbnail_instructor_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_blue_image_exists_or_not(self):\n response = self.app.get('/image/blue')\n if len(os.listdir(img_dir)):\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'blue image', response.data)\n else:\n self.assertEqual(response.status_code, 404)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def testMissingImage(self):\n self.assertNotIn('no_image', self.data)", "def test_get_reusableitem_api_public(self):\n\n self.reusableitem_1.is_public = True\n self.reusableitem_1.save()\n\n self.client.logout()\n\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_users_photos_view_set_get_no_user(self):\n # Create user\n user = account_models.User.objects.create_user(email='mrtest@mypapaya.io', password='pass', username='aov_hov')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(99999), format='json')\n\n self.assertEquals(request.status_code, 404)", "def create_image_allowed(self):\n return self._create_image_allowed", "def test_static_routes(self, request_client):\n rv = request_client.get(\"/static/images/Group.jpg\")\n assert \"200\" in str(rv.status)\n\n rv = request_client.get(\"/\")\n assert \"200\" in str(rv.status)", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def test_blank_avatar_serving(self):\n response = self.client.get(reverse('misago:blank-avatar', kwargs={\n 'size': 150,\n }))\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'image/png')", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def simple_test(self, img, img_meta, **kwargs):\n pass", "def test_photo_classification_view_set_post_not_public(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'tag',\n 'public': False\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n\n self.assertEquals(request.status_code, 400)\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n self.assertEquals(len(classifications), 11)", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_not_authed_public_project(self):\n # Clear out existing project with ID=1 if necessary.\n Project.objects.filter(id=1).delete()\n locale = LocaleFactory.create(code='fakelocale')\n project = ProjectFactory.create(id=1, slug='valid-project', locales=[locale])\n ResourceFactory.create(project=project)\n\n response = self.client.get('/fakelocale/valid-project/')\n assert_equal(response.status_code, 200)\n # I'd assertTemplateUsed here but it doesn't work on non-DTL\n # templates.", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_should_render_with_published(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(public=True)))", "def test_no_permission(client):\n user = user_with_permissions()\n\n url = reverse(\"admin:index\")\n client.force_login(user)\n\n response = client.get(url)\n assert parse_sidemenu(response) == {\"Global\": [\"/en/admin/\"]}", "def test_api_thumbnail_read_detail_anonymous(self):\n video = VideoFactory()\n thumbnail = ThumbnailFactory(video=video)\n response = self.client.get(self._get_url(video, thumbnail))\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )" ]
[ "0.6889831", "0.67943805", "0.67671615", "0.6649116", "0.66382194", "0.6531554", "0.64865774", "0.6406382", "0.6406382", "0.62943596", "0.6245481", "0.62123114", "0.6179777", "0.61140406", "0.6112191", "0.6104334", "0.61026704", "0.6068213", "0.605546", "0.6054745", "0.5990862", "0.5967168", "0.5963532", "0.5924293", "0.59057504", "0.58848983", "0.5877516", "0.58750445", "0.5852969", "0.5811244", "0.5766195", "0.5727394", "0.57270914", "0.57023466", "0.5690992", "0.5688808", "0.56831557", "0.56824076", "0.5673566", "0.5659266", "0.56364363", "0.5629719", "0.5621988", "0.56169033", "0.5615864", "0.5607517", "0.55885184", "0.558458", "0.5574517", "0.55675954", "0.5567513", "0.5561838", "0.55596954", "0.55576825", "0.5553547", "0.5549761", "0.55314934", "0.5498259", "0.5489704", "0.54774827", "0.5474488", "0.5457479", "0.54518825", "0.5440521", "0.54395705", "0.54385525", "0.5434679", "0.5434461", "0.54336077", "0.5424886", "0.5423224", "0.5421345", "0.54211575", "0.54092836", "0.5408918", "0.5407439", "0.54044455", "0.5400117", "0.53939724", "0.53937745", "0.53930455", "0.53907335", "0.53904265", "0.5387467", "0.53836554", "0.5380659", "0.53738624", "0.5372553", "0.5372031", "0.5365092", "0.5359846", "0.5357994", "0.53555286", "0.534997", "0.53498137", "0.5348644", "0.5347754", "0.53471154", "0.53460485", "0.5345386" ]
0.65038705
6
Tests that an empty context (with is_admin set to True) can access an owned image with is_public set to True.
def test_empty_public_owned(self): self.do_visible(True, 'pattieblack', True, is_admin=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_private_images_get(self):\n pass", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def test_aws_service_api_public_image_get(self):\n pass", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_aws_service_api_public_images_get(self):\n pass", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_filter_owner_permission(self):\n User = get_user_model()\n user1 = User.objects.create(username=\"test_user1\", email=\"user1@test.com\")\n obj = DescriptorSchema.objects.create(contributor=user1)\n obj.set_permission(Permission.VIEW, user1)\n\n data_template = {\n \"users\": {user1.id: \"view\"},\n \"groups\": {1: \"edit\", 2: \"NONE\"},\n }\n\n check_owner_permission(data_template, False, obj)\n\n # Check that only owner can set owner permission.\n data = deepcopy(data_template)\n data[\"users\"][1] = \"owner\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that only owner can rewoke owner permission.\n obj.set_permission(Permission.OWNER, user1)\n data = deepcopy(data_template)\n data[\"users\"][1] = \"edit\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that group can not be owner.\n obj.set_permission(Permission.VIEW, user1)\n data = deepcopy(data_template)\n data[\"groups\"][1] = \"owner\"\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, False, obj)\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, True, obj)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_show_host_not_exist(self):\n self.req.environ['cinder.context'].is_admin = True\n dest = 'dummydest'\n self.assertRaises(webob.exc.HTTPNotFound,\n self.controller.show,\n self.req, dest)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_anon_public(self):\n self.do_visible(True, None, True)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_should_render_for_owner(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context()))", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def every_existing_owner_should_have_valid_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if owner['user_type'] == 'does_not_exist':\n continue\n link = owner['profile_image']\n assert validators.url(link), (\n 'Owner %s (%d) in item %d has an invalid profile image link: %s'\n .format(owner['display_name'], owner['user_id'], link))\n logging.debug(\n 'Owner %s (%d) has a valid profile image link: %s',\n owner['display_name'], owner['user_id'], link)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def test_get_all_accessible_by_id_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_id_list(\n self.template_id_list, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_models_organization_get_abilities_owner(self):\n access = factories.UserOrganizationAccessFactory(role=\"owner\")\n abilities = access.organization.get_abilities(access.user)\n self.assertEqual(\n abilities,\n {\n \"delete\": True,\n \"get\": True,\n \"patch\": True,\n \"put\": True,\n \"manage_accesses\": True,\n },\n )", "def test_should_render_for_owner(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n can_edit_reviewrequest=False)))", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_is_owner_inherited_and_local(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertTrue(self.project.is_owner(self.user_alice))", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_make_reusableitem_public_not_owner(self):\n\n # ensure is_public is false to start with\n original_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n original_reusableitem.is_public = False\n original_reusableitem.save()\n\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'is_public': True}, format='json')\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='another_user@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='another_user@mail.com', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_has_object_read_permission_private(\n mock_parent_permission, api_rf, profile_factory\n):\n profile = profile_factory(is_private=True)\n request = api_rf.get(\"/\")\n\n expected = mock_parent_permission.return_value\n\n assert profile.has_object_read_permission(request) == expected\n assert mock_parent_permission.call_count == 1\n assert mock_parent_permission.call_args[0] == (request,)", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_no_owner(self):\n form, (case, ) = _create_case(domain=self.domain, case_id=uuid.uuid4().hex, owner_id=None)\n location = get_case_location(case)\n self.assertIsNone(location)", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def glance_list_owned_public_images(glance, owner_id, image_info):\n\n images = []\n list_kwargs = {'filters': {'visibility': 'public', 'owner': owner_id}}\n public_owned_images = glance.images.list(**list_kwargs)\n for image in public_owned_images:\n # only images with the \"same\" name ('TOTO' matches 'test_TOTO' or 'TOTO - 2016-10-03')\n if image_info['image_name'] in image.name:\n images.append(image)\n return images", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def test_should_render_with_user_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='doc')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n if getattr(self, 'read_only_always_show', False):\n self.assertTrue(\n self.action.should_render(context=request_context))\n else:\n self.assertFalse(\n self.action.should_render(context=request_context))", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def test_auth_sharable_owned(self):\n self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.global_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_admin_api_organization_accesses_request_authenticated(self):\n user = factories.UserFactory(is_staff=False, is_superuser=False)\n self.client.login(username=user.username, password=\"password\")\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertContains(\n response,\n \"You do not have permission to perform this action.\",\n status_code=403,\n )", "def test_ami_exists(self) -> None:\n owner = self.sts.get_caller_identity().get('Account')\n amis = self.ec2_client.describe_images(\n Owners=[owner],\n Filters=[{\n 'Name': 'name',\n 'Values': ['saints-xctf-web-server*']\n }]\n )\n self.assertTrue(len(amis.get('Images')) > 0)", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_should_render_with_unpublished(self) -> None:\n self.assertFalse(self.action.should_render(\n context=self._create_request_context(public=False)))", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_get_owned(self):\n user = User.create(name='foo', email='foo@bar.com')\n user.put()\n response = self.testapp.get(\n '/api/users/{}'.format(user.uid),\n headers=self.login_headers(user),\n )\n response_dict = json.loads(response.body)\n self.assertEqual(response_dict['uid'], user.uid)", "def test_without_whitelisted_ip(self, public_omis_api_client):\n order = OrderFactory()\n\n url = reverse(\n 'api-v3:public-omis:payment:collection',\n kwargs={'public_token': order.public_token},\n )\n public_omis_api_client.set_http_x_forwarded_for('1.1.1.1')\n response = public_omis_api_client.get(url)\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"fakeuser@opencloud.us\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def test_get_private(self):\n owner = create_user('owner')\n create_snippet('foo', private=True, owner=owner)\n expected = [0, 0, 1, 1]\n\n def check(i):\n response = self.get()\n self.assertEqual(len(response.data), expected[i])\n\n self.check_for_users(check, owner)", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='another_user@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='another_user@mail.com', password='testpassword')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_logged_in_contributor(self):\n self.make_logged_in_contributor()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PRIVATE)", "def test_show_container_privilege(self):\n pass", "def test_pull_public_acl(self):\n # Use defaults for format, arch, os, ostcount, replication\n pr = {\n 'system': self.system,\n 'itype': self.itype,\n 'tag': self.tag,\n 'remotetype': 'dockerv2',\n 'userACL': [1001, 1002],\n 'groupACL': [1003, 1004]\n }\n # Do the pull\n session = self.m.new_session(self.auth, self.system)\n rec = self.m.pull(session, pr) # ,delay=False)\n id = rec['_id']\n self.assertIsNotNone(rec)\n # Confirm record\n q = {'system': self.system, 'itype': self.itype,\n 'pulltag': self.tag}\n state = self.time_wait(id)\n mrec = self.images.find_one(q)\n self.assertIn('_id', mrec)\n self.assertIn('userACL', mrec)\n self.assertIn('ENV', mrec)\n # Track through transistions\n state = self.time_wait(id)\n self.assertEqual(state, 'READY')\n mrec = self.images.find_one(q)\n self.assertIn('ENV', mrec)\n self.assertIn('private', mrec)\n self.assertFalse(mrec['private'])", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_owner(self):\n self.assertIsNone(self.env.project_repo_owner)", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def test_admin_view_access(request_ctx):\n user = User.get(email=\"root@test0.edu\")\n with request_ctx(\"/org_invitatin_summary\") as ctx:\n login_user(user, remember=True)\n rv = ctx.app.full_dispatch_request()\n assert rv.status_code == 200\n assert b\"<!DOCTYPE html>\" in rv.data, \"Expected HTML content\"\n assert b\"Organisation Invitation Summary\" in rv.data\n assert b\"root@test0.edu\" in rv.data", "def test_requires_privilege_no_current_role(self):\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n request = HttpRequest()\n with self.assertRaises(PermissionDenied):\n view(request)" ]
[ "0.68287414", "0.68124455", "0.6661694", "0.65978193", "0.6575297", "0.6509962", "0.6473352", "0.64539516", "0.6439792", "0.63888246", "0.63792306", "0.6371669", "0.6366299", "0.6361131", "0.63585526", "0.62366015", "0.6124907", "0.6047954", "0.6000606", "0.5996451", "0.5975125", "0.5964795", "0.5952911", "0.59268355", "0.5913268", "0.59085023", "0.59085023", "0.5895989", "0.5867335", "0.5856853", "0.58070636", "0.57991976", "0.5791046", "0.5755171", "0.574756", "0.5738153", "0.573065", "0.5719062", "0.57190394", "0.5715734", "0.56930846", "0.56855905", "0.56762093", "0.5673772", "0.56519073", "0.56442267", "0.56299627", "0.56288487", "0.56236976", "0.56153584", "0.56118655", "0.5606984", "0.56027514", "0.5601977", "0.55888516", "0.5570084", "0.5568927", "0.55662125", "0.55637795", "0.5561662", "0.5561662", "0.55540174", "0.5552353", "0.554314", "0.5535131", "0.5518284", "0.55046254", "0.5500887", "0.5498299", "0.5468182", "0.54639506", "0.54596794", "0.54503095", "0.5446673", "0.54212624", "0.5419131", "0.5417108", "0.5402844", "0.53995425", "0.53973806", "0.53930324", "0.5392557", "0.53872496", "0.5379053", "0.5378532", "0.53738326", "0.5369264", "0.53665584", "0.53646654", "0.5363156", "0.5358063", "0.5348132", "0.5348045", "0.53305453", "0.53295875", "0.532814", "0.5322111", "0.5322111", "0.5321312", "0.53198457" ]
0.7047812
0
Tests that an empty context (with is_admin set to True) can access an image with is_public set to False.
def test_empty_private(self): self.do_visible(True, None, False, is_admin=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_aws_service_api_private_images_get(self):\n pass", "def test_aws_service_api_private_image_get(self):\n pass", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_aws_service_api_public_image_get(self):\n pass", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_aws_service_api_public_images_get(self):\n pass", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_show_host_not_exist(self):\n self.req.environ['cinder.context'].is_admin = True\n dest = 'dummydest'\n self.assertRaises(webob.exc.HTTPNotFound,\n self.controller.show,\n self.req, dest)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_should_render_with_unpublished(self) -> None:\n self.assertFalse(self.action.should_render(\n context=self._create_request_context(public=False)))", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_api_thumbnail_administrator_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_anon_public(self):\n self.do_visible(True, None, True)", "def test_get_all_accessible_by_id_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_id_list(\n self.template_id_list, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_should_render_with_user_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='doc')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n if getattr(self, 'read_only_always_show', False):\n self.assertTrue(\n self.action.should_render(context=request_context))\n else:\n self.assertFalse(\n self.action.should_render(context=request_context))", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def create_image_allowed(self):\n return self._create_image_allowed", "def test_photo_classification_view_set_get_public(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n photo_models.PhotoClassification.objects.create_or_update(name='City', public=False)\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications')\n results = request.data['results']\n\n self.assertEquals(len(results), 13)", "def test_an_admin_view_anonymous(client):\n response = client.get('/admin/')\n assert status(response) == 'found'\n assert response.url.startswith('/admin/login/')", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def test_public(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"public\"})", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_default_publish(self):\n self.assertIs(self.photo.published, 'Public')", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def test_get_all_as_anonymous_with_access_right_returns_global_templates(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 1)\n self.assertTrue((template.user is None for template in templates))", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.global_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_make_reusableitem_public_not_owner(self):\n\n # ensure is_public is false to start with\n original_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n original_reusableitem.is_public = False\n original_reusableitem.save()\n\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'is_public': True}, format='json')\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_should_render_with_superuser_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='admin')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n self.assertTrue(\n self.action.should_render(context=request_context))", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_no_permission(client):\n user = user_with_permissions()\n\n url = reverse(\"admin:index\")\n client.force_login(user)\n\n response = client.get(url)\n assert parse_sidemenu(response) == {\"Global\": [\"/en/admin/\"]}", "def test_no_io_on_bool():\n file = get_image_cache_file()\n bool(file)\n assert not file.storage.exists.called\n assert not file.storage.open.called", "def test_no_io_on_url():\n file = get_image_cache_file()\n file.url\n assert not file.storage.exists.called\n assert not file.storage.open.called", "def testMissingImage(self):\n self.assertNotIn('no_image', self.data)", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_users_photos_view_set_get_no_user(self):\n # Create user\n user = account_models.User.objects.create_user(email='mrtest@mypapaya.io', password='pass', username='aov_hov')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(99999), format='json')\n\n self.assertEquals(request.status_code, 404)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def submit_image_no_login(self):\n\n result = self.client.get(\"/submit_image\", follow_redirects=True)\n self.assertIn(b\"Password\", result.data)", "def check_is_admin(context):\n\n init()\n # the target is user-self\n target = default_target(context)\n return _ENFORCER.authorize('context_is_admin', target, context)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_aws_service_api_image_get(self):\n pass" ]
[ "0.6836172", "0.6736128", "0.67086965", "0.6513356", "0.65056294", "0.6466665", "0.64133674", "0.6399818", "0.6399818", "0.63390875", "0.6302523", "0.6226055", "0.61997557", "0.6121848", "0.6118551", "0.6102106", "0.6089454", "0.6087831", "0.6057525", "0.6023837", "0.5988064", "0.5981804", "0.59564173", "0.59529775", "0.5928504", "0.58769774", "0.5861309", "0.58524543", "0.5832807", "0.5832652", "0.5798373", "0.57948714", "0.57888496", "0.57504094", "0.57397836", "0.5738421", "0.5734089", "0.5721497", "0.563578", "0.5631878", "0.56304276", "0.5615635", "0.5609847", "0.5596449", "0.5595076", "0.55613196", "0.5555593", "0.5542774", "0.5540809", "0.55325955", "0.5531463", "0.5526257", "0.5520916", "0.5519858", "0.55079645", "0.5507475", "0.5507228", "0.5504407", "0.5503828", "0.5499077", "0.54950255", "0.549442", "0.5490237", "0.5487188", "0.54826146", "0.5470216", "0.5469601", "0.5468212", "0.54643637", "0.54572564", "0.5456872", "0.5449535", "0.5449345", "0.5446323", "0.5443379", "0.5443273", "0.5443273", "0.54402506", "0.5440084", "0.5437612", "0.5433786", "0.54332054", "0.5430351", "0.5430314", "0.5424414", "0.542257", "0.54146343", "0.54146343", "0.54146343", "0.54146343", "0.5414097", "0.5413787", "0.5413345", "0.5409995", "0.5407467", "0.54033935", "0.54024184", "0.54022735", "0.54022735", "0.5400276" ]
0.6399744
9
Tests that an empty context (with is_admin set to True) can access an owned image with is_public set to False.
def test_empty_private_owned(self): self.do_visible(True, 'pattieblack', False, is_admin=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_private_images_get(self):\n pass", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_filter_owner_permission(self):\n User = get_user_model()\n user1 = User.objects.create(username=\"test_user1\", email=\"user1@test.com\")\n obj = DescriptorSchema.objects.create(contributor=user1)\n obj.set_permission(Permission.VIEW, user1)\n\n data_template = {\n \"users\": {user1.id: \"view\"},\n \"groups\": {1: \"edit\", 2: \"NONE\"},\n }\n\n check_owner_permission(data_template, False, obj)\n\n # Check that only owner can set owner permission.\n data = deepcopy(data_template)\n data[\"users\"][1] = \"owner\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that only owner can rewoke owner permission.\n obj.set_permission(Permission.OWNER, user1)\n data = deepcopy(data_template)\n data[\"users\"][1] = \"edit\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that group can not be owner.\n obj.set_permission(Permission.VIEW, user1)\n data = deepcopy(data_template)\n data[\"groups\"][1] = \"owner\"\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, False, obj)\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, True, obj)", "def test_aws_service_api_public_image_get(self):\n pass", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_aws_service_api_public_images_get(self):\n pass", "def test_show_host_not_exist(self):\n self.req.environ['cinder.context'].is_admin = True\n dest = 'dummydest'\n self.assertRaises(webob.exc.HTTPNotFound,\n self.controller.show,\n self.req, dest)", "def test_anon_public(self):\n self.do_visible(True, None, True)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_should_render_for_owner(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context()))", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def test_should_render_for_owner(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n can_edit_reviewrequest=False)))", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_get_all_accessible_by_id_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_id_list(\n self.template_id_list, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_models_organization_get_abilities_owner(self):\n access = factories.UserOrganizationAccessFactory(role=\"owner\")\n abilities = access.organization.get_abilities(access.user)\n self.assertEqual(\n abilities,\n {\n \"delete\": True,\n \"get\": True,\n \"patch\": True,\n \"put\": True,\n \"manage_accesses\": True,\n },\n )", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='another_user@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='another_user@mail.com', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def every_existing_owner_should_have_valid_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if owner['user_type'] == 'does_not_exist':\n continue\n link = owner['profile_image']\n assert validators.url(link), (\n 'Owner %s (%d) in item %d has an invalid profile image link: %s'\n .format(owner['display_name'], owner['user_id'], link))\n logging.debug(\n 'Owner %s (%d) has a valid profile image link: %s',\n owner['display_name'], owner['user_id'], link)", "def test_make_reusableitem_public_not_owner(self):\n\n # ensure is_public is false to start with\n original_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n original_reusableitem.is_public = False\n original_reusableitem.save()\n\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'is_public': True}, format='json')\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_is_owner_inherited_and_local(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertTrue(self.project.is_owner(self.user_alice))", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_should_render_with_user_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='doc')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n if getattr(self, 'read_only_always_show', False):\n self.assertTrue(\n self.action.should_render(context=request_context))\n else:\n self.assertFalse(\n self.action.should_render(context=request_context))", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def test_no_owner(self):\n form, (case, ) = _create_case(domain=self.domain, case_id=uuid.uuid4().hex, owner_id=None)\n location = get_case_location(case)\n self.assertIsNone(location)", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_should_render_with_unpublished(self) -> None:\n self.assertFalse(self.action.should_render(\n context=self._create_request_context(public=False)))", "def test_has_object_read_permission_private(\n mock_parent_permission, api_rf, profile_factory\n):\n profile = profile_factory(is_private=True)\n request = api_rf.get(\"/\")\n\n expected = mock_parent_permission.return_value\n\n assert profile.has_object_read_permission(request) == expected\n assert mock_parent_permission.call_count == 1\n assert mock_parent_permission.call_args[0] == (request,)", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='another_user@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='another_user@mail.com', password='testpassword')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_auth_sharable_owned(self):\n self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.global_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_without_whitelisted_ip(self, public_omis_api_client):\n order = OrderFactory()\n\n url = reverse(\n 'api-v3:public-omis:payment:collection',\n kwargs={'public_token': order.public_token},\n )\n public_omis_api_client.set_http_x_forwarded_for('1.1.1.1')\n response = public_omis_api_client.get(url)\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED", "def test_admin_api_organization_accesses_request_authenticated(self):\n user = factories.UserFactory(is_staff=False, is_superuser=False)\n self.client.login(username=user.username, password=\"password\")\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertContains(\n response,\n \"You do not have permission to perform this action.\",\n status_code=403,\n )", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def test_should_render_user_with_can_change_status_and_unpublished(\n self,\n ) -> None:\n self.assertFalse(self.action.should_render(\n context=self._create_request_context(\n can_change_status=True,\n public=False,\n user=self.create_user())))", "def test_destroy_not_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token_1)\n response = self.client.delete(path=url)\n self.assertEqual(first=403, second=response.status_code)\n self.assertEqual(first=1, second=Post.objects.all().count())", "def test_requires_privilege_no_current_role(self):\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n request = HttpRequest()\n with self.assertRaises(PermissionDenied):\n view(request)", "def check_is_admin(context):\n\n init()\n # the target is user-self\n target = default_target(context)\n return _ENFORCER.authorize('context_is_admin', target, context)", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator", "def glance_list_owned_public_images(glance, owner_id, image_info):\n\n images = []\n list_kwargs = {'filters': {'visibility': 'public', 'owner': owner_id}}\n public_owned_images = glance.images.list(**list_kwargs)\n for image in public_owned_images:\n # only images with the \"same\" name ('TOTO' matches 'test_TOTO' or 'TOTO - 2016-10-03')\n if image_info['image_name'] in image.name:\n images.append(image)\n return images", "def test_get_private(self):\n owner = create_user('owner')\n create_snippet('foo', private=True, owner=owner)\n expected = [0, 0, 1, 1]\n\n def check(i):\n response = self.get()\n self.assertEqual(len(response.data), expected[i])\n\n self.check_for_users(check, owner)", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))", "def test_show_container_privilege(self):\n pass", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"fakeuser@opencloud.us\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_delete_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )" ]
[ "0.70528495", "0.6893526", "0.67592186", "0.6630321", "0.65318274", "0.65298545", "0.6517861", "0.65116936", "0.6454932", "0.6407351", "0.63348746", "0.63260263", "0.63152415", "0.6308235", "0.62680256", "0.62383586", "0.6053724", "0.59971654", "0.5979315", "0.59685737", "0.59361196", "0.5921458", "0.5909884", "0.590225", "0.590225", "0.5894046", "0.58879244", "0.58859384", "0.5884606", "0.58420056", "0.5840227", "0.58318365", "0.5815561", "0.57902616", "0.57894856", "0.577334", "0.574994", "0.5746908", "0.57375216", "0.57235", "0.57069063", "0.5696741", "0.56947607", "0.5688357", "0.5688357", "0.5683849", "0.56830215", "0.5679158", "0.5677948", "0.56750125", "0.56741583", "0.564221", "0.563737", "0.56302476", "0.56187975", "0.56115925", "0.5601187", "0.55990154", "0.5588827", "0.5584008", "0.5583446", "0.55823094", "0.5580021", "0.5578818", "0.55548805", "0.5552212", "0.5536423", "0.55349165", "0.5513506", "0.55024517", "0.5495582", "0.5485328", "0.54772", "0.5475156", "0.5461929", "0.54502314", "0.5447303", "0.54170346", "0.54121447", "0.54119253", "0.5409786", "0.5409018", "0.5409018", "0.5391568", "0.5388064", "0.5384749", "0.5381856", "0.5379029", "0.5373949", "0.53707606", "0.5369958", "0.5365546", "0.5363266", "0.5357535", "0.5350201", "0.5347934", "0.53452563", "0.5340629", "0.5336432", "0.53349555" ]
0.6911272
1
Tests that an empty context (with is_admin set to True) can not share an image, with or without membership.
def test_empty_shared(self): self.do_sharable(False, 'pattieblack', None, is_admin=True) self.do_sharable(False, 'pattieblack', FakeMembership(True), is_admin=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)", "def testMissingImage(self):\n self.assertNotIn('no_image', self.data)", "def test_logged_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n utils.test_cannot_access(self, self.url, expected_url)", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def cant_share_photo(request, ttl=None,*args, **kwargs):\n\tif ttl:\n\t\ttry:\n\t\t\tttl = int(ttl)\n\t\texcept ValueError:\n\t\t\tttl = None\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_not_shared.html\",{'photo_caption':photo_caption,'photo_id':photo_id,'photo_url':photo_url,\\\n\t\t'photo_owner_username':photo_owner_username,'origin':origin,'ttl':ttl})", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def test_show_host_not_exist(self):\n self.req.environ['cinder.context'].is_admin = True\n dest = 'dummydest'\n self.assertRaises(webob.exc.HTTPNotFound,\n self.controller.show,\n self.req, dest)", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def ensure_share(self, context, share, share_server=None):\n pass", "def test_logged_in_friend_not_in_group(self):\n\n self.make_logged_in_friend_not_in_group()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_ALLFRIENDS)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_ALLFRIENDS)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_ALLFRIENDS)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_allow_build_not_in_allowlist(self):\n handler = MyHandler()\n container = {\"name\": \"test\", \"branch\": \"branch\"}\n\n allow = handler.allow_build(ArtifactType.IMAGE,\n name=container[\"name\"],\n branch=container[\"branch\"])\n assert not allow", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def ensure_share(self, context, share, share_server=None):\r\n LOG.debug(\"Ensure share.\")", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def not_authorized(context):\n context.config_file = 'NON_EXISTENT_FILE.cfg'", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_user_not_in_group_cannot_delete(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_mod_not_reported(self):\n override_acl(self.user, {'can_moderate_private_threads': 1})\n\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 404)", "def test_user_not_in_group_cannot_update_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_user_does_not_have_access(self):\n self.assertRaises(\n ObjectDoesNotExist,\n Thread.public.get_by_user,\n **{'thread_id': self.thread.pk, 'user': self.user}\n )", "def test_group_is_not_private_user_is_not_member(self):\n thread = self.create_thread()\n user = self.create_user()\n self.assertTrue(thread.first_message.visible_to_user(user))", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_created_invalid_image(self):\n res = self.client.post(IMAGE_URL,\n data={'image': 'no_image'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def test_attachment_deletion_allowed_no_attachment(self):\n self.assertRaises(exception.ConflictNovaUsingAttachment,\n self.volume_api.attachment_deletion_allowed,\n self.context, None)", "def test_chooser_no_perms(self):\n root_page = Page.objects.get(pk=2)\n root_page.add_child(instance=NewsIndex(\n title='News', slug='news'))\n root_page.add_child(instance=SecondaryNewsIndex(\n title='Secondary News', slug='secondary-news'))\n\n response = self.client.get(reverse('wagtailnews:choose'))\n self.assertEqual(response.status_code, 403)", "def test_not_permitted(self, default_store):\n course = self.create_course_with_orphans(default_store)\n orphan_url = reverse_course_url('orphan_handler', course.id)\n\n test_user_client, test_user = self.create_non_staff_authed_user_client()\n CourseEnrollment.enroll(test_user, course.id)\n response = test_user_client.get(orphan_url)\n self.assertEqual(response.status_code, 403)\n response = test_user_client.delete(orphan_url)\n self.assertEqual(response.status_code, 403)", "def test_no_permission(client):\n user = user_with_permissions()\n\n url = reverse(\"admin:index\")\n client.force_login(user)\n\n response = client.get(url)\n assert parse_sidemenu(response) == {\"Global\": [\"/en/admin/\"]}", "def deny_access(self, context, share, access, share_server=None):\n self._get_helper(share).deny_access('/', share, access)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def every_non_existing_owner_should_not_have_profile_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'link' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile link',\n owner['display_name'])", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def _is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def _is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_cannot_create_group(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_requestNonexistentAvatarId(self):\n username = '%s@%s' % (self.localpart, self.domain)\n d = self._requestAvatarId(\n UsernamePassword(username, self.password))\n return self.assertFailure(d, errors.NoSuchUser)", "def test_video_image_upload_disabled(self):\n video_image_upload_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': 'test_vid_id'})\n response = self.client.post(video_image_upload_url, {'file': 'dummy_file'}, format='multipart')\n self.assertEqual(response.status_code, 404)", "def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_theme_template_disallow_loading_admin_templates():\n app = create_ctfd()\n with app.app_context():\n try:\n # Make an empty malicious theme\n filename = os.path.join(\n app.root_path, \"themes\", \"foo_disallow\", \"admin\", \"malicious.html\"\n )\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n set_config(\"ctf_theme\", \"foo_disallow\")\n with open(filename, \"w\") as f:\n f.write(\"malicious\")\n\n with pytest.raises(TemplateNotFound):\n render_template_string(\"{% include 'admin/malicious.html' %}\")\n finally:\n # Remove empty theme\n shutil.rmtree(\n os.path.join(app.root_path, \"themes\", \"foo_disallow\"),\n ignore_errors=True,\n )", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_not_creator_cannot_delete(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def test_attachment_deletion_allowed_no_instance(self, mock_get_server):\n attachment = self._get_attachment(with_instance_id=False)\n self.volume_api.attachment_deletion_allowed(self.context, attachment)\n mock_get_server.assert_not_called()", "def test_user_not_in_group_cannot_create_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Tab.objects.all()), 0)", "def assert_response_resource_not_accessible(self, response):\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response.json(),\n {\"detail\": \"You do not have permission to perform this action.\"},\n )", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_no_perm_thread_delete(self):\n self.context['request'].user = User.objects.get(pk=118533)\n allowed = has_perm(self.context, 'forums_forum.thread_delete_forum',\n self.forum_1)\n eq_(allowed, False)\n allowed = has_perm(self.context, 'forums_forum.thread_delete_forum',\n self.forum_2)\n eq_(allowed, False)", "def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_user_profile_picture_invalid_image_fails(self):\n image_upload_url = PROCEDURE_URL\n\n payload = {\n 'name': 'temp',\n 'speciality': [self.speciality.pk],\n 'image': 'invalid image',\n 'overview': 'bla bla bla'\n }\n\n res = self.client.post(\n image_upload_url,\n payload,\n format=\"multipart\"\n )\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_no_permissions(self):\n\n login(self.client)\n\n client = create_client('test')\n client.write_access = False\n client.save()\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': 3})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_anon_shared(self):\n self.do_sharable(False, 'pattieblack', None)\n self.do_sharable(False, 'pattieblack', FakeMembership(True))", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_thread_is_not_moderated(self):\n self.assertRaises(\n ObjectDoesNotExist,\n Thread.public.get_by_user,\n **{'thread_id': self.thread.pk, 'user': self.user}\n )\n self.group.private = False\n self.group.save()\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def test_unshare_template_registration(self):\n pass", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_creator_group_not_enabled(self):\r\n self.assertTrue(has_access(self.user, CourseCreatorRole()))", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def is_admin_context(context):\n if not context:\n warnings.warn(_('Use of empty request context is deprecated'),\n DeprecationWarning)\n raise Exception('die')\n return context.is_admin", "def is_admin_context(context):\n if not context:\n warnings.warn(_('Use of empty request context is deprecated'),\n DeprecationWarning)\n raise Exception('die')\n return context.is_admin", "def is_admin_context(context):\n if not context:\n warnings.warn(_('Use of empty request context is deprecated'),\n DeprecationWarning)\n raise Exception('die')\n return context.is_admin", "def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)", "def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)" ]
[ "0.6628714", "0.65942466", "0.6389793", "0.63213843", "0.6221423", "0.6164767", "0.6150678", "0.6041313", "0.6000155", "0.5972449", "0.597103", "0.5943589", "0.59278715", "0.59188557", "0.59188557", "0.5916836", "0.59154505", "0.5850877", "0.58333635", "0.5827698", "0.58013195", "0.578988", "0.57846916", "0.5781874", "0.5775917", "0.5775917", "0.5775917", "0.5775917", "0.5774469", "0.5759755", "0.57568246", "0.5752683", "0.57326007", "0.57232934", "0.57186633", "0.57051986", "0.5652952", "0.5646443", "0.56329817", "0.5622762", "0.5620752", "0.5620309", "0.5619624", "0.5619462", "0.56009203", "0.558251", "0.5576507", "0.55679023", "0.55673546", "0.55602574", "0.5555545", "0.55507576", "0.55482906", "0.5547911", "0.5539856", "0.5539856", "0.5539856", "0.5539856", "0.5531596", "0.5531596", "0.5517904", "0.5512854", "0.5512526", "0.5512526", "0.5512526", "0.55070454", "0.55068713", "0.5503925", "0.5503075", "0.5499324", "0.5481691", "0.54813546", "0.5477823", "0.5476448", "0.5472748", "0.5472545", "0.5471226", "0.5468453", "0.5468453", "0.5465065", "0.54636985", "0.5457848", "0.54543215", "0.54537773", "0.5453466", "0.5453292", "0.544642", "0.5444299", "0.5434446", "0.5429703", "0.5426636", "0.5426147", "0.54231197", "0.54129374", "0.5408179", "0.5387149", "0.5387149", "0.5387149", "0.5381913", "0.5381913" ]
0.60693204
7
Tests that an anonymous context (with is_admin set to False) can access an image with is_public set to True.
def test_anon_public(self): self.do_visible(True, None, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_aws_service_api_private_images_get(self):\n pass", "def test_aws_service_api_private_image_get(self):\n pass", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_aws_service_api_public_image_get(self):\n pass", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_aws_service_api_public_images_get(self):\n pass", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_api_thumbnail_administrator_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_an_admin_view_anonymous(client):\n response = client.get('/admin/')\n assert status(response) == 'found'\n assert response.url.startswith('/admin/login/')", "def test_api_thumbnail_read_detail_anonymous(self):\n video = VideoFactory()\n thumbnail = ThumbnailFactory(video=video)\n response = self.client.get(self._get_url(video, thumbnail))\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_api_thumbnail_retrieve_by_organization_instructor(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=INSTRUCTOR,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_authenticated_inherits_anonymous_permission(self):\n resource = Resource('milestone', 'milestone1')\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'anonymous', resource))\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'authenticated', resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('anonymous',\n resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('authenticated',\n resource))", "def test_get_all_accessible_by_id_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_id_list(\n self.template_id_list, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_public(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"public\"})", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_profile_image_requested_field_anonymous_user(self):\n source_threads = [\n self.create_source_thread(\n {\"user_id\": None, \"username\": None, \"anonymous\": True, \"anonymous_to_peers\": True}\n ),\n ]\n\n self.register_get_user_response(self.user, upvoted_ids=[\"test_thread\"])\n self.register_get_threads_response(source_threads, page=1, num_pages=1)\n\n response = self.client.get(\n self.url,\n {\"course_id\": str(self.course.id), \"requested_fields\": \"profile_image\"},\n )\n assert response.status_code == 200\n response_thread = json.loads(response.content.decode('utf-8'))['results'][0]\n assert response_thread['author'] is None\n assert {} == response_thread['users']", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_photo_classification_view_set_get_public(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n photo_models.PhotoClassification.objects.create_or_update(name='City', public=False)\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications')\n results = request.data['results']\n\n self.assertEquals(len(results), 13)", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_whoami_by_anonymous_user(self):\n response = self.client.get(\"/api/users/whoami/\")\n self.assertEqual(response.status_code, 401)", "def test_api_thumbnail_instructor_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def get_public_images_for_user(username):\n\n user = get_user_by_username(username)\n images = user.images\n public_images = [img for img in images if img.permission.value == \"PUBLIC\"]\n return public_images", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_get_public_guest_access(self):\n self.project.public_guest_access = True\n self.project.save()\n user_new = self.make_user('user_new')\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url, token=self.get_token(user_new))\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 1)\n self.assertEqual(\n response_data[0]['sodar_uuid'], str(self.project.sodar_uuid)\n )", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_api_thumbnail_retrieve_by_playlist_admin(self):\n playlist_access = PlaylistAccessFactory(\n playlist=self.some_video.playlist,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n playlist_access.user, self.some_thumbnail\n )", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.global_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_public_video_detail_anonymous(\n settings, logged_in_apiclient, user_admin_list_data\n):\n client, _ = logged_in_apiclient\n client.logout()\n user_admin_list_data.video.is_public = True\n user_admin_list_data.video.save()\n url = reverse(\n \"video-detail\", kwargs={\"video_key\": user_admin_list_data.video.hexkey}\n )\n response = client.get(url, follow=True)\n assert response.status_code == 200", "def test_logged_in_contributor(self):\n self.make_logged_in_contributor()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PRIVATE)", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_access_is_password_protected(self):\n\n # Directly create an Upload object in the DB.\n upload = create_url_upload(\"https://google.com/robots.txt\")\n response = self.client.get(reverse(\"upload-access\", args=[upload.id]))\n\n # Ensure request does not error, no special code expected here.\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(response, \"Password\")", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url)", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def glance_update_and_set_public(glance, image, image_info):\n image_properties = image_info['image_properties']\n try:\n logger.debug(\"glance image update: properties=%s\", image_properties)\n glance.images.update(image.id, **image_properties)\n logger.debug(\"glance image update: visibility=public\")\n glance.images.update(image.id, visibility='public')\n except Exception:\n logger.exception(\"Updating (-> public) Glance image '%s' [%s] failed\", image.name, image.id)\n return 1\n\n return 0", "def test_admin_api_organization_accesses_request_authenticated(self):\n user = factories.UserFactory(is_staff=False, is_superuser=False)\n self.client.login(username=user.username, password=\"password\")\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertContains(\n response,\n \"You do not have permission to perform this action.\",\n status_code=403,\n )", "def test_anonymous(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def is_public(self):\n return self.document.is_public", "def test_get_all_as_anonymous_with_access_right_returns_global_templates(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 1)\n self.assertTrue((template.user is None for template in templates))", "def testAnonymousCannotEdit(self):\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") == -1,\n \"Anonymous user is able to edit tasks.\")", "def test_admin_view_access(request_ctx):\n user = User.get(email=\"root@test0.edu\")\n with request_ctx(\"/org_invitatin_summary\") as ctx:\n login_user(user, remember=True)\n rv = ctx.app.full_dispatch_request()\n assert rv.status_code == 200\n assert b\"<!DOCTYPE html>\" in rv.data, \"Expected HTML content\"\n assert b\"Organisation Invitation Summary\" in rv.data\n assert b\"root@test0.edu\" in rv.data", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def test_url_is_accessible_to_externals(self):\n\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 200)\n\n self.user.is_external = False\n self.user.save()\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 404)", "def is_private():", "def test_api_thumbnail_read_detail_admin_user(self):\n video = VideoFactory(\n uploaded_on=datetime(2018, 8, 8, tzinfo=timezone.utc), upload_state=\"ready\"\n )\n thumbnail = ThumbnailFactory(video=video, upload_state=\"pending\")\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n roles=[\"administrator\"],\n )\n\n response = self.client.get(\n self._get_url(video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"id\": str(thumbnail.id),\n \"active_stamp\": None,\n \"is_ready_to_show\": False,\n \"upload_state\": \"pending\",\n \"urls\": None,\n \"video\": str(video.id),\n },\n )", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def public_resource():\n return create_response(\n status_value=True,\n code=200,\n message=\"You have access the public resource\"\n )", "def test_public(client, url):\n response = client.get(url, secure=True)\n assert response.status_code == 200", "def test_document_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=True,\n title=\"document-001\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n jwt_token = AccessToken(context.get(\"jwt\"))\n\n self.assertEqual(\n jwt_token.payload[\"permissions\"],\n {\"can_access_dashboard\": False, \"can_update\": False},\n )\n self.assertEqual(context.get(\"state\"), \"success\")\n self.assertEqual(\n context.get(\"resource\"),\n {\n \"active_stamp\": \"1569309880\",\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"id\": str(document.id),\n \"upload_state\": document.upload_state,\n \"title\": document.title,\n \"extension\": None,\n \"filename\": \"playlist-003_document-001\",\n \"playlist\": {\n \"id\": str(document.playlist.id),\n \"title\": \"playlist-003\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"url\": (\n \"https://abc.cloudfront.net/301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\"\n \"/document/1569309880?response-content-disposition=attachment%3B\"\n \"+filename%3Dplaylist-003_document-001\"\n ),\n },\n )\n self.assertEqual(context.get(\"modelName\"), \"documents\")\n self.assertIsNone(context.get(\"context_id\"))", "def test_models_organization_get_abilities_anonymous(self):\n organization = factories.OrganizationFactory()\n abilities = organization.get_abilities(AnonymousUser())\n\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )", "def allowed_to_preview(user):\n if (\n user.is_authenticated and\n user.is_active and\n user.is_staff\n ):\n return True\n return False", "def is_anonymous_access_allowed(self):\n return self._is_anonymous_access_allowed", "def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_default_publish(self):\n self.assertIs(self.photo.published, 'Public')" ]
[ "0.7238873", "0.6944182", "0.6944182", "0.6802116", "0.6793539", "0.6687952", "0.6679281", "0.667282", "0.65554184", "0.65371454", "0.6498809", "0.6494417", "0.6419172", "0.6416152", "0.62828475", "0.6213219", "0.61874706", "0.61568916", "0.6070943", "0.6064813", "0.6054403", "0.60503423", "0.60481566", "0.6037368", "0.60318875", "0.600708", "0.59996974", "0.5997387", "0.5995558", "0.59891963", "0.5964184", "0.59595317", "0.5953296", "0.59514636", "0.59499073", "0.5931501", "0.5920939", "0.58967364", "0.589072", "0.5887656", "0.5863653", "0.5860015", "0.5858565", "0.5856011", "0.58267677", "0.58137536", "0.57895815", "0.57696694", "0.5765875", "0.57548654", "0.57352334", "0.5727324", "0.5723782", "0.571889", "0.5710287", "0.5708332", "0.5700436", "0.5688485", "0.5669508", "0.5653898", "0.56483114", "0.5641009", "0.56344813", "0.56158596", "0.5612549", "0.56070405", "0.5599608", "0.55964535", "0.5583538", "0.557831", "0.55762076", "0.55709255", "0.55709064", "0.55495507", "0.55357635", "0.55262804", "0.5520572", "0.55184793", "0.5518009", "0.5517249", "0.5513267", "0.5513029", "0.551286", "0.55119824", "0.550731", "0.5505061", "0.5499325", "0.5497596", "0.5494552", "0.5492552", "0.54906315", "0.54877514", "0.54738814", "0.5473668", "0.5471383", "0.54711056", "0.5467526", "0.54627335", "0.54627335", "0.54521394" ]
0.60884994
18
Tests that an anonymous context (with is_admin set to False) can access an owned image with is_public set to True.
def test_anon_public_owned(self): self.do_visible(True, 'pattieblack', True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_aws_service_api_private_image_get(self):\n pass", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_aws_service_api_private_images_get(self):\n pass", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_anon_public(self):\n self.do_visible(True, None, True)", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_aws_service_api_public_image_get(self):\n pass", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_aws_service_api_public_images_get(self):\n pass", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_admin_api_organization_accesses_request_authenticated(self):\n user = factories.UserFactory(is_staff=False, is_superuser=False)\n self.client.login(username=user.username, password=\"password\")\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertContains(\n response,\n \"You do not have permission to perform this action.\",\n status_code=403,\n )", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_authenticated_inherits_anonymous_permission(self):\n resource = Resource('milestone', 'milestone1')\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'anonymous', resource))\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'authenticated', resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('anonymous',\n resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('authenticated',\n resource))", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_get_all_accessible_by_id_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_id_list(\n self.template_id_list, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def test_models_organization_get_abilities_owner(self):\n access = factories.UserOrganizationAccessFactory(role=\"owner\")\n abilities = access.organization.get_abilities(access.user)\n self.assertEqual(\n abilities,\n {\n \"delete\": True,\n \"get\": True,\n \"patch\": True,\n \"put\": True,\n \"manage_accesses\": True,\n },\n )", "def test_whoami_by_anonymous_user(self):\n response = self.client.get(\"/api/users/whoami/\")\n self.assertEqual(response.status_code, 401)", "def test_logged_in_contributor(self):\n self.make_logged_in_contributor()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PRIVATE)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_api_thumbnail_retrieve_by_organization_instructor(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=INSTRUCTOR,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_is_owner_inherited_and_local(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertTrue(self.project.is_owner(self.user_alice))", "def test_models_organization_get_abilities_anonymous(self):\n organization = factories.OrganizationFactory()\n abilities = organization.get_abilities(AnonymousUser())\n\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )", "def test_get_public_guest_access(self):\n self.project.public_guest_access = True\n self.project.save()\n user_new = self.make_user('user_new')\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url, token=self.get_token(user_new))\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 1)\n self.assertEqual(\n response_data[0]['sodar_uuid'], str(self.project.sodar_uuid)\n )", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_has_object_read_permission_private(\n mock_parent_permission, api_rf, profile_factory\n):\n profile = profile_factory(is_private=True)\n request = api_rf.get(\"/\")\n\n expected = mock_parent_permission.return_value\n\n assert profile.has_object_read_permission(request) == expected\n assert mock_parent_permission.call_count == 1\n assert mock_parent_permission.call_args[0] == (request,)", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_filter_owner_permission(self):\n User = get_user_model()\n user1 = User.objects.create(username=\"test_user1\", email=\"user1@test.com\")\n obj = DescriptorSchema.objects.create(contributor=user1)\n obj.set_permission(Permission.VIEW, user1)\n\n data_template = {\n \"users\": {user1.id: \"view\"},\n \"groups\": {1: \"edit\", 2: \"NONE\"},\n }\n\n check_owner_permission(data_template, False, obj)\n\n # Check that only owner can set owner permission.\n data = deepcopy(data_template)\n data[\"users\"][1] = \"owner\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that only owner can rewoke owner permission.\n obj.set_permission(Permission.OWNER, user1)\n data = deepcopy(data_template)\n data[\"users\"][1] = \"edit\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that group can not be owner.\n obj.set_permission(Permission.VIEW, user1)\n data = deepcopy(data_template)\n data[\"groups\"][1] = \"owner\"\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, False, obj)\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, True, obj)", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_profile_image_requested_field_anonymous_user(self):\n source_threads = [\n self.create_source_thread(\n {\"user_id\": None, \"username\": None, \"anonymous\": True, \"anonymous_to_peers\": True}\n ),\n ]\n\n self.register_get_user_response(self.user, upvoted_ids=[\"test_thread\"])\n self.register_get_threads_response(source_threads, page=1, num_pages=1)\n\n response = self.client.get(\n self.url,\n {\"course_id\": str(self.course.id), \"requested_fields\": \"profile_image\"},\n )\n assert response.status_code == 200\n response_thread = json.loads(response.content.decode('utf-8'))['results'][0]\n assert response_thread['author'] is None\n assert {} == response_thread['users']", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.global_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def glance_list_owned_public_images(glance, owner_id, image_info):\n\n images = []\n list_kwargs = {'filters': {'visibility': 'public', 'owner': owner_id}}\n public_owned_images = glance.images.list(**list_kwargs)\n for image in public_owned_images:\n # only images with the \"same\" name ('TOTO' matches 'test_TOTO' or 'TOTO - 2016-10-03')\n if image_info['image_name'] in image.name:\n images.append(image)\n return images", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def test_admin_api_organization_accesses_request_anonymous(self):\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertEqual(response.status_code, 401)\n content = response.json()\n self.assertEqual(\n content[\"detail\"], \"Authentication credentials were not provided.\"\n )", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.vendor_id)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_an_admin_view_anonymous(client):\n response = client.get('/admin/')\n assert status(response) == 'found'\n assert response.url.startswith('/admin/login/')", "async def test_datasets_access_call_public(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, (['mock:public:id'], [], []))", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_auth_sharable_owned(self):\n self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def only_owner(func):\n def decorated(*_, **kwargs):\n id = kwargs['id']\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != id:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_show_container_privilege(self):\n pass", "def test_delete_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_admin_view_access(request_ctx):\n user = User.get(email=\"root@test0.edu\")\n with request_ctx(\"/org_invitatin_summary\") as ctx:\n login_user(user, remember=True)\n rv = ctx.app.full_dispatch_request()\n assert rv.status_code == 200\n assert b\"<!DOCTYPE html>\" in rv.data, \"Expected HTML content\"\n assert b\"Organisation Invitation Summary\" in rv.data\n assert b\"root@test0.edu\" in rv.data", "def mock_nuser(self, auth):\n oput = {\n \"display_name\": \"Bob\",\n \"images\": [{\"url\": \"./static/defaultPfp.png\"}],\n \"type\": \"user\",\n }\n return oput", "def test_public(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"public\"})", "def test_api_thumbnail_read_detail_anonymous(self):\n video = VideoFactory()\n thumbnail = ThumbnailFactory(video=video)\n response = self.client.get(self._get_url(video, thumbnail))\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def testAnonymousCannotEdit(self):\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") == -1,\n \"Anonymous user is able to edit tasks.\")", "def public_resource():\n return create_response(\n status_value=True,\n code=200,\n message=\"You have access the public resource\"\n )", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='another_user@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='another_user@mail.com', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_make_reusableitem_public_not_owner(self):\n\n # ensure is_public is false to start with\n original_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n original_reusableitem.is_public = False\n original_reusableitem.save()\n\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'is_public': True}, format='json')\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_get_private(self):\n owner = create_user('owner')\n create_snippet('foo', private=True, owner=owner)\n expected = [0, 0, 1, 1]\n\n def check(i):\n response = self.get()\n self.assertEqual(len(response.data), expected[i])\n\n self.check_for_users(check, owner)", "def test_ami_exists(self) -> None:\n owner = self.sts.get_caller_identity().get('Account')\n amis = self.ec2_client.describe_images(\n Owners=[owner],\n Filters=[{\n 'Name': 'name',\n 'Values': ['saints-xctf-web-server*']\n }]\n )\n self.assertTrue(len(amis.get('Images')) > 0)", "def is_private():", "def test_api_thumbnail_administrator_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_06_user_public_profile(self):\r\n # As Anonymou user\r\n url = \"/account/%s\" % self.name\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_06_user_public_profile(self):\r\n # As Anonymou user\r\n url = \"/account/%s\" % self.name\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"fakeuser@opencloud.us\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def test_make_reusableitem_public_owner(self):\n\n # ensure is_public is false to start with\n original_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n original_reusableitem.is_public = False\n original_reusableitem.save()\n\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'is_public': True}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n # the request should succeed\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # the value should be updated\n self.assertEqual(updated_object.is_public, True)", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_func(self):\n member_to_view = self.get_object()\n is_self = self.request.user.rfid == member_to_view.rfid\n view_others = self.request.user.has_permission(\"core.view_member\")\n return view_others or is_self" ]
[ "0.7065106", "0.6996031", "0.69618833", "0.6797919", "0.6793525", "0.67584467", "0.6624481", "0.65740824", "0.65740824", "0.6573112", "0.65594554", "0.65575325", "0.6551023", "0.6535924", "0.6526421", "0.64701694", "0.6450757", "0.6430041", "0.63988215", "0.6285752", "0.6262469", "0.6246644", "0.6237362", "0.6209549", "0.62071806", "0.6176905", "0.61224073", "0.6108376", "0.6078835", "0.60620296", "0.60412127", "0.6010369", "0.59985155", "0.5984377", "0.59736824", "0.5948925", "0.5937293", "0.59315604", "0.59200627", "0.5899835", "0.5877727", "0.5864512", "0.5844514", "0.58395994", "0.5834492", "0.58331156", "0.5820841", "0.5791333", "0.5782197", "0.5774131", "0.57734156", "0.57666546", "0.5757105", "0.5756627", "0.57481796", "0.57407266", "0.57296556", "0.57035345", "0.5675204", "0.56650496", "0.56596375", "0.56395626", "0.56323296", "0.56241786", "0.56152356", "0.56013596", "0.55987865", "0.55974025", "0.55964583", "0.5594138", "0.5586166", "0.5585891", "0.5563598", "0.5554803", "0.55535734", "0.5553236", "0.555138", "0.5549763", "0.5548059", "0.55434453", "0.5534644", "0.5533844", "0.5525972", "0.55223155", "0.5518778", "0.55156606", "0.5512557", "0.5508786", "0.55061203", "0.5506079", "0.55034566", "0.5500556", "0.54994506", "0.549395", "0.5493779", "0.54921925", "0.5489898", "0.5487272", "0.54851824", "0.5469308" ]
0.69320416
3
Tests that an anonymous context (with is_admin set to False) can access an unowned image with is_public set to False.
def test_anon_private(self): self.do_visible(True, None, False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_aws_service_api_private_images_get(self):\n pass", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_whoami_by_anonymous_user(self):\n response = self.client.get(\"/api/users/whoami/\")\n self.assertEqual(response.status_code, 401)", "def test_delete_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_anonymous(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_anon_public(self):\n self.do_visible(True, None, True)", "def test_get_all_accessible_by_id_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_id_list(\n self.template_id_list, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_authenticated_inherits_anonymous_permission(self):\n resource = Resource('milestone', 'milestone1')\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'anonymous', resource))\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'authenticated', resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('anonymous',\n resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('authenticated',\n resource))", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_anonymous_cannot_get_userprofileview(dclient):\n resp = dclient.get(\"/api/record/profile/\", follow=True)\n assert resp.status_code == 403", "def test_an_admin_view_anonymous(client):\n response = client.get('/admin/')\n assert status(response) == 'found'\n assert response.url.startswith('/admin/login/')", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_delete_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.global_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_delete_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='another_user@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='another_user@mail.com', password='testpassword')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='another_user@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='another_user@mail.com', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_profile_api_anon(self):\n self.client.logout()\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 403)", "def test_get_all_accessible_by_hash_list_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def is_private():", "def test_get_all_as_anonymous_with_access_right_returns_global_templates(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 1)\n self.assertTrue((template.user is None for template in templates))", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def test_logged_in_friend_not_in_group(self):\n\n self.make_logged_in_friend_not_in_group()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_ALLFRIENDS)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_ALLFRIENDS)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_ALLFRIENDS)", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_requires_privilege_no_such(self):\n @requires_privilege('bomboozle', domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n requestor_role = arbitrary.role()\n request = HttpRequest()\n request.role = requestor_role\n with self.assertRaises(PermissionDenied):\n view(request)", "def test_closed_api_not_authenticated(self):\n anonymous = Group.objects.get(name='Anonymous')\n anonymous.permissions.remove(\n Permission.objects.get(codename='api_read_initiative')\n )\n\n response = self.client.get(self.initiatives_url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def testAnonymousCannotEdit(self):\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") == -1,\n \"Anonymous user is able to edit tasks.\")", "def test_get_all_accessible_by_hash_list_as_anonymous_does_not_return_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.global_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_without_whitelisted_ip(self, public_omis_api_client):\n order = OrderFactory()\n\n url = reverse(\n 'api-v3:public-omis:payment:collection',\n kwargs={'public_token': order.public_token},\n )\n public_omis_api_client.set_http_x_forwarded_for('1.1.1.1')\n response = public_omis_api_client.get(url)\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_non_owner_authenticated_user_read_given_blogpost_hidden_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create(hidden=1)\r\n user = UserFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').read, blogpost)", "def test_make_reusableitem_public_not_owner(self):\n\n # ensure is_public is false to start with\n original_reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n original_reusableitem.is_public = False\n original_reusableitem.save()\n\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.patch(get_reusable_item_1_url(self), {'is_public': True}, format='json')\n\n # the request should fail\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_requires_privilege_no_current_role(self):\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n request = HttpRequest()\n with self.assertRaises(PermissionDenied):\n view(request)", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_admin_api_organization_accesses_request_anonymous(self):\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertEqual(response.status_code, 401)\n content = response.json()\n self.assertEqual(\n content[\"detail\"], \"Authentication credentials were not provided.\"\n )", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def test_models_organization_get_abilities_anonymous(self):\n organization = factories.OrganizationFactory()\n abilities = organization.get_abilities(AnonymousUser())\n\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )", "def no_network_access_check(user):\n return not user.has_property(\"network_access\")", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_access_negative(self, api):\n self.builder.add_user(api.get_user())\n r1 = api.access_user(api.get_user(), False)\n access_false = self.builder.get_access(api.get_user())\n self.builder.del_user(api.get_user())\n assert access_false == 0\n assert r1.status_code == 200", "def test_anonymous_user(self):\r\n self.request.user = AnonymousUser()\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])" ]
[ "0.74188906", "0.7377053", "0.7144698", "0.7144698", "0.6877964", "0.67840487", "0.676745", "0.67583287", "0.6704905", "0.6680704", "0.66680026", "0.66457", "0.663838", "0.6636891", "0.6628976", "0.6577022", "0.65723354", "0.6503181", "0.6480543", "0.6474261", "0.64629656", "0.6443702", "0.64249873", "0.63818884", "0.63681024", "0.63545835", "0.632805", "0.63218874", "0.6316559", "0.63142353", "0.6273037", "0.6270338", "0.62618405", "0.62191504", "0.62191504", "0.62017024", "0.61517936", "0.6149368", "0.61281896", "0.61045915", "0.6099422", "0.6090164", "0.6085582", "0.6081666", "0.6077936", "0.60692257", "0.6068726", "0.60632163", "0.60588783", "0.604874", "0.6044397", "0.60420513", "0.60318685", "0.60243255", "0.6017243", "0.6000177", "0.6000177", "0.59833515", "0.5980675", "0.5976529", "0.59732753", "0.597198", "0.5950697", "0.59454006", "0.5942782", "0.59362525", "0.5931649", "0.5928754", "0.591818", "0.59139663", "0.59030837", "0.5901103", "0.589323", "0.5889313", "0.5885979", "0.58839035", "0.58632296", "0.58617157", "0.58602387", "0.58524334", "0.58501065", "0.5839948", "0.5830691", "0.5822796", "0.58221245", "0.5818656", "0.5807199", "0.5800876", "0.57934135", "0.5788844", "0.57839465", "0.5781505", "0.5778051", "0.5768927", "0.5758761", "0.5756927", "0.5753452", "0.57477", "0.57382184", "0.5738115" ]
0.6370962
24
Tests that an anonymous context (with is_admin set to False) cannot access an owned image with is_public set to False.
def test_anon_private_owned(self): self.do_visible(False, 'pattieblack', False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_delete_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_aws_service_api_private_images_get(self):\n pass", "def test_delete_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_aws_service_api_private_image_get(self):\n pass", "def test_delete_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_anonymous_cannot_get_userprofileview(dclient):\n resp = dclient.get(\"/api/record/profile/\", follow=True)\n assert resp.status_code == 403", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_whoami_by_anonymous_user(self):\n response = self.client.get(\"/api/users/whoami/\")\n self.assertEqual(response.status_code, 401)", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_access_control_is_superuser_as_anonymous_raises_access_control_error(\n self,\n ):\n # Arrange\n mock_request = create_mock_request(user=self.anonymous_user)\n\n # Act # Assert\n with self.assertRaises(AccessControlError):\n access_control_api.is_superuser(\n mock_function, request=mock_request\n )", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def testAnonymousCannotEdit(self):\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") == -1,\n \"Anonymous user is able to edit tasks.\")", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_requires_privilege_no_such(self):\n @requires_privilege('bomboozle', domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n requestor_role = arbitrary.role()\n request = HttpRequest()\n request.role = requestor_role\n with self.assertRaises(PermissionDenied):\n view(request)", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def test_authenticated_inherits_anonymous_permission(self):\n resource = Resource('milestone', 'milestone1')\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'anonymous', resource))\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'authenticated', resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('anonymous',\n resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('authenticated',\n resource))", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_upsert_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='another_user@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='another_user@mail.com', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_admin_api_organization_accesses_request_anonymous(self):\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertEqual(response.status_code, 401)\n content = response.json()\n self.assertEqual(\n content[\"detail\"], \"Authentication credentials were not provided.\"\n )", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='another_user@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='another_user@mail.com', password='testpassword')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_get_all_accessible_by_id_list_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_id_list(\n self.template_id_list, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)", "def test_requires_privilege_no_current_role(self):\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n request = HttpRequest()\n with self.assertRaises(PermissionDenied):\n view(request)", "def test_non_owner_authenticated_user_read_given_blogpost_hidden_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create(hidden=1)\r\n user = UserFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').read, blogpost)", "def test_logged_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n utils.test_cannot_access(self, self.url, expected_url)", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def only_owner(func):\n def decorated(*_, **kwargs):\n id = kwargs['id']\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != id:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_set_display_name_user_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_requestNonexistentAvatarId(self):\n username = '%s@%s' % (self.localpart, self.domain)\n d = self._requestAvatarId(\n UsernamePassword(username, self.password))\n return self.assertFailure(d, errors.NoSuchUser)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_anonymous(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_anon_public(self):\n self.do_visible(True, None, True)", "def test_requires_privilege_denied(self):\n\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n requestor_role = arbitrary.role()\n\n request = HttpRequest()\n request.role = requestor_role.instantiate({})\n with self.assertRaises(PermissionDenied):\n view(request)", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_logged_in_friend_not_in_group(self):\n\n self.make_logged_in_friend_not_in_group()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_ALLFRIENDS)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_ALLFRIENDS)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_ALLFRIENDS)", "def test_set_display_name_global_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_models_organization_get_abilities_anonymous(self):\n organization = factories.OrganizationFactory()\n abilities = organization.get_abilities(AnonymousUser())\n\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )", "def test_post_anonymous_private(self):\n response = self.post(content='foo', private=True)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def assert_response_resource_not_accessible(self, response):\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response.json(),\n {\"detail\": \"You do not have permission to perform this action.\"},\n )", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_set_display_name_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r" ]
[ "0.7152943", "0.7109335", "0.70072263", "0.69585896", "0.69585896", "0.68980104", "0.6817293", "0.6704428", "0.66895926", "0.64946806", "0.64736044", "0.64594865", "0.645942", "0.644374", "0.64420414", "0.6405941", "0.6374608", "0.6368752", "0.6364217", "0.6354941", "0.6344576", "0.63394815", "0.633286", "0.63244283", "0.63098013", "0.630356", "0.6294088", "0.6292082", "0.6288085", "0.6270858", "0.6269034", "0.62631005", "0.62631005", "0.62458915", "0.6220327", "0.62175286", "0.62014264", "0.6188205", "0.61774594", "0.6155064", "0.6150028", "0.6147294", "0.6145902", "0.6133994", "0.61234176", "0.61016715", "0.60899156", "0.6087885", "0.606311", "0.6060585", "0.60560876", "0.60560876", "0.60436267", "0.604317", "0.6031659", "0.60237175", "0.60203964", "0.6006878", "0.599706", "0.5994134", "0.5968433", "0.5968433", "0.5968433", "0.5968433", "0.5967996", "0.5962489", "0.59613883", "0.59566545", "0.5951349", "0.5950316", "0.59460235", "0.5943068", "0.59385496", "0.5932853", "0.5932765", "0.5931635", "0.592107", "0.5900464", "0.58992934", "0.5895116", "0.5890216", "0.58882415", "0.5882411", "0.5877384", "0.5876106", "0.58755463", "0.5874961", "0.5867038", "0.58620375", "0.5853199", "0.58499885", "0.58346814", "0.5832283", "0.5805112", "0.57964677", "0.57960236", "0.57958513", "0.5795489", "0.57945323", "0.57878417" ]
0.6603411
9
Tests that an empty context (with is_admin set to True) can not share an image, with or without membership.
def test_anon_shared(self): self.do_sharable(False, 'pattieblack', None) self.do_sharable(False, 'pattieblack', FakeMembership(True))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)", "def test_empty_shared(self):\n self.do_sharable(False, 'pattieblack', None, is_admin=True)\n self.do_sharable(False, 'pattieblack', FakeMembership(True),\n is_admin=True)", "def testMissingImage(self):\n self.assertNotIn('no_image', self.data)", "def test_logged_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n utils.test_cannot_access(self, self.url, expected_url)", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def cant_share_photo(request, ttl=None,*args, **kwargs):\n\tif ttl:\n\t\ttry:\n\t\t\tttl = int(ttl)\n\t\texcept ValueError:\n\t\t\tttl = None\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_not_shared.html\",{'photo_caption':photo_caption,'photo_id':photo_id,'photo_url':photo_url,\\\n\t\t'photo_owner_username':photo_owner_username,'origin':origin,'ttl':ttl})", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def test_show_host_not_exist(self):\n self.req.environ['cinder.context'].is_admin = True\n dest = 'dummydest'\n self.assertRaises(webob.exc.HTTPNotFound,\n self.controller.show,\n self.req, dest)", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def ensure_share(self, context, share, share_server=None):\n pass", "def test_logged_in_friend_not_in_group(self):\n\n self.make_logged_in_friend_not_in_group()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_ALLFRIENDS)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_ALLFRIENDS)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_ALLFRIENDS)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_allow_build_not_in_allowlist(self):\n handler = MyHandler()\n container = {\"name\": \"test\", \"branch\": \"branch\"}\n\n allow = handler.allow_build(ArtifactType.IMAGE,\n name=container[\"name\"],\n branch=container[\"branch\"])\n assert not allow", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def ensure_share(self, context, share, share_server=None):\r\n LOG.debug(\"Ensure share.\")", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def not_authorized(context):\n context.config_file = 'NON_EXISTENT_FILE.cfg'", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_user_not_in_group_cannot_delete(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_mod_not_reported(self):\n override_acl(self.user, {'can_moderate_private_threads': 1})\n\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 404)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_user_not_in_group_cannot_update_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_user_does_not_have_access(self):\n self.assertRaises(\n ObjectDoesNotExist,\n Thread.public.get_by_user,\n **{'thread_id': self.thread.pk, 'user': self.user}\n )", "def test_group_is_not_private_user_is_not_member(self):\n thread = self.create_thread()\n user = self.create_user()\n self.assertTrue(thread.first_message.visible_to_user(user))", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_created_invalid_image(self):\n res = self.client.post(IMAGE_URL,\n data={'image': 'no_image'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def test_chooser_no_perms(self):\n root_page = Page.objects.get(pk=2)\n root_page.add_child(instance=NewsIndex(\n title='News', slug='news'))\n root_page.add_child(instance=SecondaryNewsIndex(\n title='Secondary News', slug='secondary-news'))\n\n response = self.client.get(reverse('wagtailnews:choose'))\n self.assertEqual(response.status_code, 403)", "def test_attachment_deletion_allowed_no_attachment(self):\n self.assertRaises(exception.ConflictNovaUsingAttachment,\n self.volume_api.attachment_deletion_allowed,\n self.context, None)", "def test_not_permitted(self, default_store):\n course = self.create_course_with_orphans(default_store)\n orphan_url = reverse_course_url('orphan_handler', course.id)\n\n test_user_client, test_user = self.create_non_staff_authed_user_client()\n CourseEnrollment.enroll(test_user, course.id)\n response = test_user_client.get(orphan_url)\n self.assertEqual(response.status_code, 403)\n response = test_user_client.delete(orphan_url)\n self.assertEqual(response.status_code, 403)", "def test_no_permission(client):\n user = user_with_permissions()\n\n url = reverse(\"admin:index\")\n client.force_login(user)\n\n response = client.get(url)\n assert parse_sidemenu(response) == {\"Global\": [\"/en/admin/\"]}", "def deny_access(self, context, share, access, share_server=None):\n self._get_helper(share).deny_access('/', share, access)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def every_non_existing_owner_should_not_have_profile_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'link' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile link',\n owner['display_name'])", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def _is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def _is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_cannot_create_group(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def test_requestNonexistentAvatarId(self):\n username = '%s@%s' % (self.localpart, self.domain)\n d = self._requestAvatarId(\n UsernamePassword(username, self.password))\n return self.assertFailure(d, errors.NoSuchUser)", "def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_video_image_upload_disabled(self):\n video_image_upload_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': 'test_vid_id'})\n response = self.client.post(video_image_upload_url, {'file': 'dummy_file'}, format='multipart')\n self.assertEqual(response.status_code, 404)", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_theme_template_disallow_loading_admin_templates():\n app = create_ctfd()\n with app.app_context():\n try:\n # Make an empty malicious theme\n filename = os.path.join(\n app.root_path, \"themes\", \"foo_disallow\", \"admin\", \"malicious.html\"\n )\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n set_config(\"ctf_theme\", \"foo_disallow\")\n with open(filename, \"w\") as f:\n f.write(\"malicious\")\n\n with pytest.raises(TemplateNotFound):\n render_template_string(\"{% include 'admin/malicious.html' %}\")\n finally:\n # Remove empty theme\n shutil.rmtree(\n os.path.join(app.root_path, \"themes\", \"foo_disallow\"),\n ignore_errors=True,\n )", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_not_creator_cannot_delete(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def test_attachment_deletion_allowed_no_instance(self, mock_get_server):\n attachment = self._get_attachment(with_instance_id=False)\n self.volume_api.attachment_deletion_allowed(self.context, attachment)\n mock_get_server.assert_not_called()", "def test_user_not_in_group_cannot_create_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Tab.objects.all()), 0)", "def assert_response_resource_not_accessible(self, response):\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response.json(),\n {\"detail\": \"You do not have permission to perform this action.\"},\n )", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def test_no_perm_thread_delete(self):\n self.context['request'].user = User.objects.get(pk=118533)\n allowed = has_perm(self.context, 'forums_forum.thread_delete_forum',\n self.forum_1)\n eq_(allowed, False)\n allowed = has_perm(self.context, 'forums_forum.thread_delete_forum',\n self.forum_2)\n eq_(allowed, False)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def test_no_permissions(self):\n\n login(self.client)\n\n client = create_client('test')\n client.write_access = False\n client.save()\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': 3})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_user_profile_picture_invalid_image_fails(self):\n image_upload_url = PROCEDURE_URL\n\n payload = {\n 'name': 'temp',\n 'speciality': [self.speciality.pk],\n 'image': 'invalid image',\n 'overview': 'bla bla bla'\n }\n\n res = self.client.post(\n image_upload_url,\n payload,\n format=\"multipart\"\n )\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_thread_is_not_moderated(self):\n self.assertRaises(\n ObjectDoesNotExist,\n Thread.public.get_by_user,\n **{'thread_id': self.thread.pk, 'user': self.user}\n )\n self.group.private = False\n self.group.save()\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def test_unshare_template_registration(self):\n pass", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_creator_group_not_enabled(self):\r\n self.assertTrue(has_access(self.user, CourseCreatorRole()))", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def is_admin_context(context):\n if not context:\n warnings.warn(_('Use of empty request context is deprecated'),\n DeprecationWarning)\n raise Exception('die')\n return context.is_admin", "def is_admin_context(context):\n if not context:\n warnings.warn(_('Use of empty request context is deprecated'),\n DeprecationWarning)\n raise Exception('die')\n return context.is_admin", "def is_admin_context(context):\n if not context:\n warnings.warn(_('Use of empty request context is deprecated'),\n DeprecationWarning)\n raise Exception('die')\n return context.is_admin", "def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)", "def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)" ]
[ "0.6629645", "0.6598161", "0.6393106", "0.6323076", "0.62207663", "0.61628443", "0.61534476", "0.6073732", "0.6038234", "0.6002864", "0.5974506", "0.5970309", "0.5945625", "0.5925215", "0.5923474", "0.5923474", "0.59200865", "0.59187025", "0.5852431", "0.58361256", "0.58302045", "0.5804639", "0.5792666", "0.57858455", "0.57806975", "0.5779022", "0.5779022", "0.5779022", "0.5779022", "0.5776707", "0.57633924", "0.5758291", "0.57559973", "0.5733918", "0.57265055", "0.5722423", "0.57067597", "0.56548685", "0.56503624", "0.56362444", "0.56258905", "0.56250995", "0.56237113", "0.56232876", "0.56222576", "0.5598567", "0.55797267", "0.5578534", "0.5569999", "0.55679536", "0.5564528", "0.55596346", "0.555496", "0.5552815", "0.55501616", "0.55427533", "0.55427533", "0.55427533", "0.55427533", "0.55332214", "0.55332214", "0.5520765", "0.5514656", "0.5514273", "0.5514273", "0.5514273", "0.550591", "0.5504695", "0.55028766", "0.5502578", "0.5500912", "0.5484761", "0.5481855", "0.54806197", "0.547685", "0.5476215", "0.5474986", "0.54727054", "0.5468707", "0.5467437", "0.5467437", "0.5466807", "0.54616535", "0.5457287", "0.5455784", "0.5450277", "0.5449403", "0.54458123", "0.5436179", "0.5432655", "0.5429644", "0.5428079", "0.5425436", "0.54154986", "0.540949", "0.53891754", "0.53891754", "0.53891754", "0.53861225", "0.53861225" ]
0.54502434
86
Tests that an authenticated context (with is_admin set to False) can access an image with is_public set to True.
def test_auth_public(self): self.do_visible(True, None, True, tenant='froggy')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_private_images_get(self):\n pass", "def test_aws_service_api_public_image_get(self):\n pass", "def test_aws_service_api_public_images_get(self):\n pass", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_api_thumbnail_administrator_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_api_thumbnail_retrieve_by_playlist_admin(self):\n playlist_access = PlaylistAccessFactory(\n playlist=self.some_video.playlist,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n playlist_access.user, self.some_thumbnail\n )", "def test_public(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"public\"})", "def test_public(client, url):\n response = client.get(url, secure=True)\n assert response.status_code == 200", "def test_api_thumbnail_read_detail_admin_user(self):\n video = VideoFactory(\n uploaded_on=datetime(2018, 8, 8, tzinfo=timezone.utc), upload_state=\"ready\"\n )\n thumbnail = ThumbnailFactory(video=video, upload_state=\"pending\")\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n roles=[\"administrator\"],\n )\n\n response = self.client.get(\n self._get_url(video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"id\": str(thumbnail.id),\n \"active_stamp\": None,\n \"is_ready_to_show\": False,\n \"upload_state\": \"pending\",\n \"urls\": None,\n \"video\": str(video.id),\n },\n )", "def test_access_is_password_protected(self):\n\n # Directly create an Upload object in the DB.\n upload = create_url_upload(\"https://google.com/robots.txt\")\n response = self.client.get(reverse(\"upload-access\", args=[upload.id]))\n\n # Ensure request does not error, no special code expected here.\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(response, \"Password\")", "def test_photo_classification_view_set_get_public(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n photo_models.PhotoClassification.objects.create_or_update(name='City', public=False)\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications')\n results = request.data['results']\n\n self.assertEquals(len(results), 13)", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_api_thumbnail_instructor_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def glance_update_and_set_public(glance, image, image_info):\n image_properties = image_info['image_properties']\n try:\n logger.debug(\"glance image update: properties=%s\", image_properties)\n glance.images.update(image.id, **image_properties)\n logger.debug(\"glance image update: visibility=public\")\n glance.images.update(image.id, visibility='public')\n except Exception:\n logger.exception(\"Updating (-> public) Glance image '%s' [%s] failed\", image.name, image.id)\n return 1\n\n return 0", "def test_logged_in_contributor(self):\n self.make_logged_in_contributor()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PRIVATE)", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_api_thumbnail_retrieve_by_organization_instructor(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=INSTRUCTOR,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url)", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_photos_limited_to_user(self, api_client, test_user):\n\n user2 = get_user_model().objects.create_user(\n 'otheruser@company.com',\n 'password123'\n )\n sample_photo(user=user2)\n sample_photo(user=test_user)\n\n api_client.force_authenticate(test_user)\n res = api_client.get(PHOTO_URL)\n\n photos = Photo.objects.filter(user=test_user)\n serializer = PhotoSerializer(photos, many=True)\n\n assert res.status_code == status.HTTP_200_OK\n assert len(res.data) == 1\n assert res.data == serializer.data", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_document_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=True,\n title=\"document-001\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n jwt_token = AccessToken(context.get(\"jwt\"))\n\n self.assertEqual(\n jwt_token.payload[\"permissions\"],\n {\"can_access_dashboard\": False, \"can_update\": False},\n )\n self.assertEqual(context.get(\"state\"), \"success\")\n self.assertEqual(\n context.get(\"resource\"),\n {\n \"active_stamp\": \"1569309880\",\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"id\": str(document.id),\n \"upload_state\": document.upload_state,\n \"title\": document.title,\n \"extension\": None,\n \"filename\": \"playlist-003_document-001\",\n \"playlist\": {\n \"id\": str(document.playlist.id),\n \"title\": \"playlist-003\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"url\": (\n \"https://abc.cloudfront.net/301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\"\n \"/document/1569309880?response-content-disposition=attachment%3B\"\n \"+filename%3Dplaylist-003_document-001\"\n ),\n },\n )\n self.assertEqual(context.get(\"modelName\"), \"documents\")\n self.assertIsNone(context.get(\"context_id\"))", "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_aws_service_api_image_get(self):\n pass", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def test_admin_view_access(request_ctx):\n user = User.get(email=\"root@test0.edu\")\n with request_ctx(\"/org_invitatin_summary\") as ctx:\n login_user(user, remember=True)\n rv = ctx.app.full_dispatch_request()\n assert rv.status_code == 200\n assert b\"<!DOCTYPE html>\" in rv.data, \"Expected HTML content\"\n assert b\"Organisation Invitation Summary\" in rv.data\n assert b\"root@test0.edu\" in rv.data", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.request.user = User.objects.get(username='doc')\n self.assertTrue(\n self.action.should_render(\n context=self._create_request_context(\n User.objects.get(username='doc'))))", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def get_public_images_for_user(username):\n\n user = get_user_by_username(username)\n images = user.images\n public_images = [img for img in images if img.permission.value == \"PUBLIC\"]\n return public_images", "def allowed_to_preview(user):\n if (\n user.is_authenticated and\n user.is_active and\n user.is_staff\n ):\n return True\n return False", "def test_api_thumbnail_read_detail_anonymous(self):\n video = VideoFactory()\n thumbnail = ThumbnailFactory(video=video)\n response = self.client.get(self._get_url(video, thumbnail))\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_admin_api_organization_accesses_request_authenticated(self):\n user = factories.UserFactory(is_staff=False, is_superuser=False)\n self.client.login(username=user.username, password=\"password\")\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertContains(\n response,\n \"You do not have permission to perform this action.\",\n status_code=403,\n )", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_user_avatar_serving(self):\n User = get_user_model()\n test_user = User.objects.create_user('Bob', 'bob@bob.com', 'pass123',\n set_default_avatar=True)\n\n avatar_url = reverse('misago:user-avatar', kwargs={\n 'pk': test_user.pk,\n 'hash': test_user.avatar_hash,\n 'size': 150,\n })\n response = self.client.get(avatar_url)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'image/png')", "def is_authorized(self, request, obj=None):\r\n if request.method == 'GET':\r\n return True\r\n else:\r\n return False", "def test_home_as_user(self):\n self.client.login(username=\"test_user_1\", password=\"test\")\n response = self.client.get(\"/images/contents/\")\n self.assertJson(\n response,\n {\n \"files\": [],\n \"folders\": [],\n \"name\": \"\",\n \"path\": \"/\",\n },\n )", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "async def test___init__(image_config: ImageConfig, image_config_signed: ImageConfig):\n assert image_config\n assert image_config_signed", "def is_authenticated(self):\n return True", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_pull_public_acl(self):\n # Use defaults for format, arch, os, ostcount, replication\n pr = {\n 'system': self.system,\n 'itype': self.itype,\n 'tag': self.tag,\n 'remotetype': 'dockerv2',\n 'userACL': [1001, 1002],\n 'groupACL': [1003, 1004]\n }\n # Do the pull\n session = self.m.new_session(self.auth, self.system)\n rec = self.m.pull(session, pr) # ,delay=False)\n id = rec['_id']\n self.assertIsNotNone(rec)\n # Confirm record\n q = {'system': self.system, 'itype': self.itype,\n 'pulltag': self.tag}\n state = self.time_wait(id)\n mrec = self.images.find_one(q)\n self.assertIn('_id', mrec)\n self.assertIn('userACL', mrec)\n self.assertIn('ENV', mrec)\n # Track through transistions\n state = self.time_wait(id)\n self.assertEqual(state, 'READY')\n mrec = self.images.find_one(q)\n self.assertIn('ENV', mrec)\n self.assertIn('private', mrec)\n self.assertFalse(mrec['private'])", "def test_url_is_accessible_to_externals(self):\n\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 200)\n\n self.user.is_external = False\n self.user.save()\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 404)", "def test_edit_image_instance(self):\n self.client.force_authenticate(self.user1)\n data = {\n \"img_name\": \"photo_user1\",\n \"img_description\": \"photo of user1\",\n \"favourite\": True,\n \"width\": 700,\n \"height\": 500,\n \"share_user\": [],\n }\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.put(url, data, format=\"multipart\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Get edited object, convert to dict and compare with inputs\n obj = model_to_dict(Images.objects.get(id=1))\n for field, edited_data in data.items():\n self.assertEqual(edited_data, obj[field])\n # Check if image was edited to a new input\n edited_img = Image.open(self.test_pic_folder + \"/test.png\")\n self.assertEqual(edited_img.size, (700, 500))", "def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)", "def test_admin_can_login_to_web_portal(admin):", "def test_has_object_read_permission_private(\n mock_parent_permission, api_rf, profile_factory\n):\n profile = profile_factory(is_private=True)\n request = api_rf.get(\"/\")\n\n expected = mock_parent_permission.return_value\n\n assert profile.has_object_read_permission(request) == expected\n assert mock_parent_permission.call_count == 1\n assert mock_parent_permission.call_args[0] == (request,)", "def test_get_public_guest_access(self):\n self.project.public_guest_access = True\n self.project.save()\n user_new = self.make_user('user_new')\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url, token=self.get_token(user_new))\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 1)\n self.assertEqual(\n response_data[0]['sodar_uuid'], str(self.project.sodar_uuid)\n )", "def test_default_publish(self):\n self.assertIs(self.photo.published, 'Public')", "def test_get_reusableitem_api_public(self):\n\n self.reusableitem_1.is_public = True\n self.reusableitem_1.save()\n\n self.client.logout()\n\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_list_image(self):\n pass", "def public_resource():\n return create_response(\n status_value=True,\n code=200,\n message=\"You have access the public resource\"\n )", "def test_requires_auth(client, admin_client, url):\n response = client.get(url, secure=True)\n assert response.status_code == 302\n response = admin_client.get(url, secure=True)\n assert response.status_code == 200", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_image_import(self):\r\n module_store = modulestore('direct')\r\n\r\n content_store = contentstore()\r\n\r\n # Use conditional_and_poll, as it's got an image already\r\n import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['conditional_and_poll'],\r\n static_content_store=content_store\r\n )\r\n\r\n course = module_store.get_courses()[0]\r\n\r\n # Make sure the course image is set to the right place\r\n self.assertEqual(course.course_image, 'images_course_image.jpg')\r\n\r\n # Ensure that the imported course image is present -- this shouldn't raise an exception\r\n asset_key = course.id.make_asset_key('asset', course.course_image)\r\n content_store.find(asset_key)", "def test_permission(self):\n response = self._get()\n self.assertEqual(response.status_code, 200)", "def test_permission(self):\n response = self._get()\n self.assertEqual(response.status_code, 200)", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_page_view_permission(self):\n \n adminonlypage = create_page_in_admin(self.testproject,\"adminonlypage\",\n permission_lvl=Page.ADMIN_ONLY) \n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n publicpage = create_page_in_admin(self.testproject,\"publicpage\",\n permission_lvl=Page.ALL)\n \n self._test_page_can_be_viewed(self.projectadmin,adminonlypage)\n self._test_page_can_not_be_viewed(self.participant,adminonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,adminonlypage) \n self._test_page_can_not_be_viewed(None,adminonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)\n self._test_page_can_be_viewed(self.participant,registeredonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,registeredonlypage)\n self._test_page_can_not_be_viewed(None,registeredonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,publicpage)\n self._test_page_can_be_viewed(self.participant,publicpage)\n self._test_page_can_be_viewed(self.registered_user,publicpage)\n self._test_page_can_be_viewed(None,publicpage) # None = not logged in" ]
[ "0.7359513", "0.6947033", "0.6891199", "0.6887777", "0.67541796", "0.66183466", "0.65862226", "0.6581423", "0.64067996", "0.6383469", "0.6383469", "0.63796556", "0.6267833", "0.6206277", "0.61293125", "0.612802", "0.6117827", "0.61051565", "0.6098563", "0.6088459", "0.60742253", "0.6048395", "0.60339636", "0.6027125", "0.60144794", "0.5943688", "0.5942696", "0.591928", "0.59065676", "0.59027827", "0.58877796", "0.5880767", "0.5836869", "0.5796547", "0.5775495", "0.57685065", "0.57639885", "0.575522", "0.57524526", "0.57454515", "0.5744631", "0.57330185", "0.57017374", "0.56966805", "0.5691735", "0.5681333", "0.5678797", "0.5675532", "0.567521", "0.56739104", "0.56715965", "0.56422913", "0.56312007", "0.56182766", "0.56161785", "0.5589359", "0.55864376", "0.5580648", "0.55797046", "0.55784756", "0.5569853", "0.5566717", "0.5564384", "0.5563484", "0.5558437", "0.5542863", "0.5541235", "0.55380607", "0.5533856", "0.5531754", "0.55137897", "0.5508075", "0.5496846", "0.54833317", "0.5479099", "0.54759395", "0.54639894", "0.5461758", "0.5456973", "0.5446684", "0.5441488", "0.54376006", "0.54321635", "0.54321367", "0.5426156", "0.54259753", "0.5423946", "0.5420913", "0.5412646", "0.54120225", "0.54115784", "0.54109335", "0.54059553", "0.53993136", "0.5398721", "0.5386683", "0.5386683", "0.5382808", "0.53814715", "0.5380295" ]
0.6749067
5
Tests that an authenticated context (with is_admin set to False) can access an image (which it does not own) with is_public set to True.
def test_auth_public_unowned(self): self.do_visible(True, 'pattieblack', True, tenant='froggy')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_aws_service_api_private_images_get(self):\n pass", "def test_aws_service_api_private_image_get(self):\n pass", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_aws_service_api_public_image_get(self):\n pass", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_aws_service_api_public_images_get(self):\n pass", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_api_thumbnail_administrator_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def test_api_thumbnail_instructor_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_photos_limited_to_user(self, api_client, test_user):\n\n user2 = get_user_model().objects.create_user(\n 'otheruser@company.com',\n 'password123'\n )\n sample_photo(user=user2)\n sample_photo(user=test_user)\n\n api_client.force_authenticate(test_user)\n res = api_client.get(PHOTO_URL)\n\n photos = Photo.objects.filter(user=test_user)\n serializer = PhotoSerializer(photos, many=True)\n\n assert res.status_code == status.HTTP_200_OK\n assert len(res.data) == 1\n assert res.data == serializer.data", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_access_is_password_protected(self):\n\n # Directly create an Upload object in the DB.\n upload = create_url_upload(\"https://google.com/robots.txt\")\n response = self.client.get(reverse(\"upload-access\", args=[upload.id]))\n\n # Ensure request does not error, no special code expected here.\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(response, \"Password\")", "def test_public(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"public\"})", "def test_api_thumbnail_retrieve_by_organization_instructor(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=INSTRUCTOR,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_api_thumbnail_retrieve_by_playlist_admin(self):\n playlist_access = PlaylistAccessFactory(\n playlist=self.some_video.playlist,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n playlist_access.user, self.some_thumbnail\n )", "def glance_update_and_set_public(glance, image, image_info):\n image_properties = image_info['image_properties']\n try:\n logger.debug(\"glance image update: properties=%s\", image_properties)\n glance.images.update(image.id, **image_properties)\n logger.debug(\"glance image update: visibility=public\")\n glance.images.update(image.id, visibility='public')\n except Exception:\n logger.exception(\"Updating (-> public) Glance image '%s' [%s] failed\", image.name, image.id)\n return 1\n\n return 0", "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_photo_classification_view_set_get_public(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n photo_models.PhotoClassification.objects.create_or_update(name='City', public=False)\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications')\n results = request.data['results']\n\n self.assertEquals(len(results), 13)", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def submit_image_no_login(self):\n\n result = self.client.get(\"/submit_image\", follow_redirects=True)\n self.assertIn(b\"Password\", result.data)", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_aws_service_api_image_get(self):\n pass", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_public(client, url):\n response = client.get(url, secure=True)\n assert response.status_code == 200", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_api_thumbnail_read_detail_admin_user(self):\n video = VideoFactory(\n uploaded_on=datetime(2018, 8, 8, tzinfo=timezone.utc), upload_state=\"ready\"\n )\n thumbnail = ThumbnailFactory(video=video, upload_state=\"pending\")\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n roles=[\"administrator\"],\n )\n\n response = self.client.get(\n self._get_url(video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"id\": str(thumbnail.id),\n \"active_stamp\": None,\n \"is_ready_to_show\": False,\n \"upload_state\": \"pending\",\n \"urls\": None,\n \"video\": str(video.id),\n },\n )", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_logged_in_contributor(self):\n self.make_logged_in_contributor()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PRIVATE)", "def test_api_thumbnail_read_detail_anonymous(self):\n video = VideoFactory()\n thumbnail = ThumbnailFactory(video=video)\n response = self.client.get(self._get_url(video, thumbnail))\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_url_is_accessible_to_externals(self):\n\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 200)\n\n self.user.is_external = False\n self.user.save()\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 404)", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.request.user = User.objects.get(username='doc')\n self.assertTrue(\n self.action.should_render(\n context=self._create_request_context(\n User.objects.get(username='doc'))))", "def test_read_unauthenticated(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_should_render_with_user_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='doc')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n if getattr(self, 'read_only_always_show', False):\n self.assertTrue(\n self.action.should_render(context=request_context))\n else:\n self.assertFalse(\n self.action.should_render(context=request_context))", "def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_pull_public_acl(self):\n # Use defaults for format, arch, os, ostcount, replication\n pr = {\n 'system': self.system,\n 'itype': self.itype,\n 'tag': self.tag,\n 'remotetype': 'dockerv2',\n 'userACL': [1001, 1002],\n 'groupACL': [1003, 1004]\n }\n # Do the pull\n session = self.m.new_session(self.auth, self.system)\n rec = self.m.pull(session, pr) # ,delay=False)\n id = rec['_id']\n self.assertIsNotNone(rec)\n # Confirm record\n q = {'system': self.system, 'itype': self.itype,\n 'pulltag': self.tag}\n state = self.time_wait(id)\n mrec = self.images.find_one(q)\n self.assertIn('_id', mrec)\n self.assertIn('userACL', mrec)\n self.assertIn('ENV', mrec)\n # Track through transistions\n state = self.time_wait(id)\n self.assertEqual(state, 'READY')\n mrec = self.images.find_one(q)\n self.assertIn('ENV', mrec)\n self.assertIn('private', mrec)\n self.assertFalse(mrec['private'])", "def test_non_owner_authenticated_user_read_given_blogpost_hidden_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create(hidden=1)\r\n user = UserFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').read, blogpost)", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_has_object_read_permission_private(\n mock_parent_permission, api_rf, profile_factory\n):\n profile = profile_factory(is_private=True)\n request = api_rf.get(\"/\")\n\n expected = mock_parent_permission.return_value\n\n assert profile.has_object_read_permission(request) == expected\n assert mock_parent_permission.call_count == 1\n assert mock_parent_permission.call_args[0] == (request,)", "def is_authorized(self, request, obj=None):\r\n if request.method == 'GET':\r\n return True\r\n else:\r\n return False", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url)", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_document_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=True,\n title=\"document-001\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n jwt_token = AccessToken(context.get(\"jwt\"))\n\n self.assertEqual(\n jwt_token.payload[\"permissions\"],\n {\"can_access_dashboard\": False, \"can_update\": False},\n )\n self.assertEqual(context.get(\"state\"), \"success\")\n self.assertEqual(\n context.get(\"resource\"),\n {\n \"active_stamp\": \"1569309880\",\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"id\": str(document.id),\n \"upload_state\": document.upload_state,\n \"title\": document.title,\n \"extension\": None,\n \"filename\": \"playlist-003_document-001\",\n \"playlist\": {\n \"id\": str(document.playlist.id),\n \"title\": \"playlist-003\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"url\": (\n \"https://abc.cloudfront.net/301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\"\n \"/document/1569309880?response-content-disposition=attachment%3B\"\n \"+filename%3Dplaylist-003_document-001\"\n ),\n },\n )\n self.assertEqual(context.get(\"modelName\"), \"documents\")\n self.assertIsNone(context.get(\"context_id\"))", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def create_image_allowed(self):\n return self._create_image_allowed", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_ALBUM_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_admin_api_organization_accesses_request_authenticated(self):\n user = factories.UserFactory(is_staff=False, is_superuser=False)\n self.client.login(username=user.username, password=\"password\")\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertContains(\n response,\n \"You do not have permission to perform this action.\",\n status_code=403,\n )" ]
[ "0.7225246", "0.7020056", "0.7001125", "0.6687295", "0.6627152", "0.6603058", "0.6603058", "0.6587212", "0.6518558", "0.6515378", "0.64673233", "0.63507557", "0.63226664", "0.63195735", "0.6267392", "0.6243786", "0.6220447", "0.62151307", "0.6181711", "0.61252385", "0.6124277", "0.6107395", "0.6100652", "0.608461", "0.6070133", "0.60595113", "0.60412824", "0.6016835", "0.59481746", "0.5940368", "0.5937758", "0.59255695", "0.5867598", "0.58601004", "0.58258694", "0.5820948", "0.5785316", "0.5784658", "0.57730377", "0.57698166", "0.57608867", "0.5758756", "0.5757796", "0.57061946", "0.5703367", "0.57011163", "0.5696043", "0.56893814", "0.5687013", "0.56859505", "0.56790173", "0.5676589", "0.56765586", "0.567448", "0.56560594", "0.5653531", "0.5650866", "0.56500506", "0.5648371", "0.5644424", "0.56409544", "0.5640917", "0.5640917", "0.56396437", "0.5637747", "0.56248635", "0.5615651", "0.56085056", "0.56057894", "0.5605576", "0.5595851", "0.5582555", "0.55769324", "0.5575957", "0.55701506", "0.5570109", "0.5556163", "0.55539834", "0.55466837", "0.5544485", "0.5544093", "0.55382884", "0.5532013", "0.5508954", "0.55006886", "0.5493655", "0.5491136", "0.5486884", "0.5480527", "0.5480154", "0.5475205", "0.5473081", "0.5472612", "0.54703176", "0.5469416", "0.5467387", "0.5461067", "0.5458984", "0.5455204", "0.5454569" ]
0.6331947
12
Tests that an authenticated context (with is_admin set to False) can access an image (which it does own) with is_public set to True.
def test_auth_public_owned(self): self.do_visible(True, 'pattieblack', True, tenant='pattieblack')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_private_images_get(self):\n pass", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_aws_service_api_public_image_get(self):\n pass", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_aws_service_api_public_images_get(self):\n pass", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def test_api_thumbnail_administrator_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def glance_update_and_set_public(glance, image, image_info):\n image_properties = image_info['image_properties']\n try:\n logger.debug(\"glance image update: properties=%s\", image_properties)\n glance.images.update(image.id, **image_properties)\n logger.debug(\"glance image update: visibility=public\")\n glance.images.update(image.id, visibility='public')\n except Exception:\n logger.exception(\"Updating (-> public) Glance image '%s' [%s] failed\", image.name, image.id)\n return 1\n\n return 0", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_api_thumbnail_retrieve_by_playlist_admin(self):\n playlist_access = PlaylistAccessFactory(\n playlist=self.some_video.playlist,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n playlist_access.user, self.some_thumbnail\n )", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_public(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"public\"})", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_photos_limited_to_user(self, api_client, test_user):\n\n user2 = get_user_model().objects.create_user(\n 'otheruser@company.com',\n 'password123'\n )\n sample_photo(user=user2)\n sample_photo(user=test_user)\n\n api_client.force_authenticate(test_user)\n res = api_client.get(PHOTO_URL)\n\n photos = Photo.objects.filter(user=test_user)\n serializer = PhotoSerializer(photos, many=True)\n\n assert res.status_code == status.HTTP_200_OK\n assert len(res.data) == 1\n assert res.data == serializer.data", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_logged_in_contributor(self):\n self.make_logged_in_contributor()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PRIVATE)", "def test_access_is_password_protected(self):\n\n # Directly create an Upload object in the DB.\n upload = create_url_upload(\"https://google.com/robots.txt\")\n response = self.client.get(reverse(\"upload-access\", args=[upload.id]))\n\n # Ensure request does not error, no special code expected here.\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(response, \"Password\")", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def test_api_thumbnail_read_detail_admin_user(self):\n video = VideoFactory(\n uploaded_on=datetime(2018, 8, 8, tzinfo=timezone.utc), upload_state=\"ready\"\n )\n thumbnail = ThumbnailFactory(video=video, upload_state=\"pending\")\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n roles=[\"administrator\"],\n )\n\n response = self.client.get(\n self._get_url(video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"id\": str(thumbnail.id),\n \"active_stamp\": None,\n \"is_ready_to_show\": False,\n \"upload_state\": \"pending\",\n \"urls\": None,\n \"video\": str(video.id),\n },\n )", "def test_api_thumbnail_instructor_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_public(client, url):\n response = client.get(url, secure=True)\n assert response.status_code == 200", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_api_thumbnail_retrieve_by_organization_instructor(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=INSTRUCTOR,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_photo_classification_view_set_get_public(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n photo_models.PhotoClassification.objects.create_or_update(name='City', public=False)\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications')\n results = request.data['results']\n\n self.assertEquals(len(results), 13)", "def test_aws_service_api_image_get(self):\n pass", "def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def test_pull_public_acl(self):\n # Use defaults for format, arch, os, ostcount, replication\n pr = {\n 'system': self.system,\n 'itype': self.itype,\n 'tag': self.tag,\n 'remotetype': 'dockerv2',\n 'userACL': [1001, 1002],\n 'groupACL': [1003, 1004]\n }\n # Do the pull\n session = self.m.new_session(self.auth, self.system)\n rec = self.m.pull(session, pr) # ,delay=False)\n id = rec['_id']\n self.assertIsNotNone(rec)\n # Confirm record\n q = {'system': self.system, 'itype': self.itype,\n 'pulltag': self.tag}\n state = self.time_wait(id)\n mrec = self.images.find_one(q)\n self.assertIn('_id', mrec)\n self.assertIn('userACL', mrec)\n self.assertIn('ENV', mrec)\n # Track through transistions\n state = self.time_wait(id)\n self.assertEqual(state, 'READY')\n mrec = self.images.find_one(q)\n self.assertIn('ENV', mrec)\n self.assertIn('private', mrec)\n self.assertFalse(mrec['private'])", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def test_document_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=True,\n title=\"document-001\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n jwt_token = AccessToken(context.get(\"jwt\"))\n\n self.assertEqual(\n jwt_token.payload[\"permissions\"],\n {\"can_access_dashboard\": False, \"can_update\": False},\n )\n self.assertEqual(context.get(\"state\"), \"success\")\n self.assertEqual(\n context.get(\"resource\"),\n {\n \"active_stamp\": \"1569309880\",\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"id\": str(document.id),\n \"upload_state\": document.upload_state,\n \"title\": document.title,\n \"extension\": None,\n \"filename\": \"playlist-003_document-001\",\n \"playlist\": {\n \"id\": str(document.playlist.id),\n \"title\": \"playlist-003\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"url\": (\n \"https://abc.cloudfront.net/301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\"\n \"/document/1569309880?response-content-disposition=attachment%3B\"\n \"+filename%3Dplaylist-003_document-001\"\n ),\n },\n )\n self.assertEqual(context.get(\"modelName\"), \"documents\")\n self.assertIsNone(context.get(\"context_id\"))", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_edit_image_instance(self):\n self.client.force_authenticate(self.user1)\n data = {\n \"img_name\": \"photo_user1\",\n \"img_description\": \"photo of user1\",\n \"favourite\": True,\n \"width\": 700,\n \"height\": 500,\n \"share_user\": [],\n }\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.put(url, data, format=\"multipart\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Get edited object, convert to dict and compare with inputs\n obj = model_to_dict(Images.objects.get(id=1))\n for field, edited_data in data.items():\n self.assertEqual(edited_data, obj[field])\n # Check if image was edited to a new input\n edited_img = Image.open(self.test_pic_folder + \"/test.png\")\n self.assertEqual(edited_img.size, (700, 500))", "def test_has_object_read_permission_private(\n mock_parent_permission, api_rf, profile_factory\n):\n profile = profile_factory(is_private=True)\n request = api_rf.get(\"/\")\n\n expected = mock_parent_permission.return_value\n\n assert profile.has_object_read_permission(request) == expected\n assert mock_parent_permission.call_count == 1\n assert mock_parent_permission.call_args[0] == (request,)", "def test_admin_api_organization_accesses_request_authenticated(self):\n user = factories.UserFactory(is_staff=False, is_superuser=False)\n self.client.login(username=user.username, password=\"password\")\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertContains(\n response,\n \"You do not have permission to perform this action.\",\n status_code=403,\n )", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.request.user = User.objects.get(username='doc')\n self.assertTrue(\n self.action.should_render(\n context=self._create_request_context(\n User.objects.get(username='doc'))))", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def is_authorized(self, request, obj=None):\r\n if request.method == 'GET':\r\n return True\r\n else:\r\n return False", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "def test_url_is_accessible_to_externals(self):\n\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 200)\n\n self.user.is_external = False\n self.user.save()\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 404)", "def get_public_images_for_user(username):\n\n user = get_user_by_username(username)\n images = user.images\n public_images = [img for img in images if img.permission.value == \"PUBLIC\"]\n return public_images", "def test_admin_view_access(request_ctx):\n user = User.get(email=\"root@test0.edu\")\n with request_ctx(\"/org_invitatin_summary\") as ctx:\n login_user(user, remember=True)\n rv = ctx.app.full_dispatch_request()\n assert rv.status_code == 200\n assert b\"<!DOCTYPE html>\" in rv.data, \"Expected HTML content\"\n assert b\"Organisation Invitation Summary\" in rv.data\n assert b\"root@test0.edu\" in rv.data", "def test_api_thumbnail_read_detail_anonymous(self):\n video = VideoFactory()\n thumbnail = ThumbnailFactory(video=video)\n response = self.client.get(self._get_url(video, thumbnail))\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_image_import(self):\r\n module_store = modulestore('direct')\r\n\r\n content_store = contentstore()\r\n\r\n # Use conditional_and_poll, as it's got an image already\r\n import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['conditional_and_poll'],\r\n static_content_store=content_store\r\n )\r\n\r\n course = module_store.get_courses()[0]\r\n\r\n # Make sure the course image is set to the right place\r\n self.assertEqual(course.course_image, 'images_course_image.jpg')\r\n\r\n # Ensure that the imported course image is present -- this shouldn't raise an exception\r\n asset_key = course.id.make_asset_key('asset', course.course_image)\r\n content_store.find(asset_key)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def test_user_avatar_serving(self):\n User = get_user_model()\n test_user = User.objects.create_user('Bob', 'bob@bob.com', 'pass123',\n set_default_avatar=True)\n\n avatar_url = reverse('misago:user-avatar', kwargs={\n 'pk': test_user.pk,\n 'hash': test_user.avatar_hash,\n 'size': 150,\n })\n response = self.client.get(avatar_url)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'image/png')", "def is_authorized(self, request, obj=None):\r\n return True", "def allowed_to_preview(user):\n if (\n user.is_authenticated and\n user.is_active and\n user.is_staff\n ):\n return True\n return False", "def test_list_image(self):\n pass", "def test_home_as_user(self):\n self.client.login(username=\"test_user_1\", password=\"test\")\n response = self.client.get(\"/images/contents/\")\n self.assertJson(\n response,\n {\n \"files\": [],\n \"folders\": [],\n \"name\": \"\",\n \"path\": \"/\",\n },\n )", "def test_get_public_guest_access(self):\n self.project.public_guest_access = True\n self.project.save()\n user_new = self.make_user('user_new')\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url, token=self.get_token(user_new))\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 1)\n self.assertEqual(\n response_data[0]['sodar_uuid'], str(self.project.sodar_uuid)\n )", "async def test___init__(image_config: ImageConfig, image_config_signed: ImageConfig):\n assert image_config\n assert image_config_signed", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)", "def test_should_render_with_user_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='doc')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n if getattr(self, 'read_only_always_show', False):\n self.assertTrue(\n self.action.should_render(context=request_context))\n else:\n self.assertFalse(\n self.action.should_render(context=request_context))", "def test_get_reusableitem_api_public(self):\n\n self.reusableitem_1.is_public = True\n self.reusableitem_1.save()\n\n self.client.logout()\n\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_client_photo_view(self):\r\n response = self.test_client.get(reverse('client_photo'))\r\n self.assertEqual(response.status_code, 200)\r\n self.assertTrue(self.client1.get_client_photo_data()\r\n in response.context['clients'])", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_ALBUM_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"fakeuser@opencloud.us\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_page_view_permission(self):\n \n adminonlypage = create_page_in_admin(self.testproject,\"adminonlypage\",\n permission_lvl=Page.ADMIN_ONLY) \n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n publicpage = create_page_in_admin(self.testproject,\"publicpage\",\n permission_lvl=Page.ALL)\n \n self._test_page_can_be_viewed(self.projectadmin,adminonlypage)\n self._test_page_can_not_be_viewed(self.participant,adminonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,adminonlypage) \n self._test_page_can_not_be_viewed(None,adminonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)\n self._test_page_can_be_viewed(self.participant,registeredonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,registeredonlypage)\n self._test_page_can_not_be_viewed(None,registeredonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,publicpage)\n self._test_page_can_be_viewed(self.participant,publicpage)\n self._test_page_can_be_viewed(self.registered_user,publicpage)\n self._test_page_can_be_viewed(None,publicpage) # None = not logged in", "def test_default_publish(self):\n self.assertIs(self.photo.published, 'Public')" ]
[ "0.73359025", "0.69640315", "0.6943459", "0.6736147", "0.6731248", "0.65971106", "0.65788954", "0.6512353", "0.6501864", "0.6422808", "0.63721627", "0.63721627", "0.6363177", "0.6315687", "0.6243033", "0.6159482", "0.6147465", "0.6144617", "0.6143973", "0.6141446", "0.61072034", "0.6104067", "0.60402876", "0.6035576", "0.60076946", "0.59484875", "0.5921155", "0.5887372", "0.5858703", "0.584998", "0.58407396", "0.5826784", "0.582361", "0.58222574", "0.5810045", "0.57799876", "0.57697916", "0.57592404", "0.5750172", "0.5742414", "0.5736039", "0.5720758", "0.5719569", "0.57189256", "0.5717449", "0.57170105", "0.57141495", "0.57126474", "0.56883764", "0.5671473", "0.56699306", "0.5665516", "0.5654681", "0.5650051", "0.5648757", "0.56454074", "0.5624714", "0.56202114", "0.55991274", "0.5597044", "0.5575395", "0.5575146", "0.5570336", "0.5559437", "0.55589414", "0.5555299", "0.5552616", "0.5546868", "0.55466336", "0.5534909", "0.5530163", "0.55283475", "0.5526529", "0.55189687", "0.5498024", "0.54938066", "0.54859185", "0.5476082", "0.54712856", "0.5470314", "0.5452643", "0.54491866", "0.5444074", "0.5442838", "0.54397595", "0.54393923", "0.5434921", "0.5434421", "0.54235023", "0.54181236", "0.54125017", "0.5409896", "0.53994375", "0.53986406", "0.53946364", "0.53942114", "0.53846127", "0.53837866", "0.538129", "0.537931" ]
0.64223385
10
Tests that an authenticated context (with is_admin set to False) can access an image with is_public set to False.
def test_auth_private(self): self.do_visible(True, None, False, tenant='froggy')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_private_images_get(self):\n pass", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_aws_service_api_public_image_get(self):\n pass", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_aws_service_api_public_images_get(self):\n pass", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_api_thumbnail_administrator_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_api_thumbnail_retrieve_by_playlist_admin(self):\n playlist_access = PlaylistAccessFactory(\n playlist=self.some_video.playlist,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n playlist_access.user, self.some_thumbnail\n )", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_access_is_password_protected(self):\n\n # Directly create an Upload object in the DB.\n upload = create_url_upload(\"https://google.com/robots.txt\")\n response = self.client.get(reverse(\"upload-access\", args=[upload.id]))\n\n # Ensure request does not error, no special code expected here.\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(response, \"Password\")", "def test_api_thumbnail_instructor_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_api_thumbnail_read_detail_admin_user(self):\n video = VideoFactory(\n uploaded_on=datetime(2018, 8, 8, tzinfo=timezone.utc), upload_state=\"ready\"\n )\n thumbnail = ThumbnailFactory(video=video, upload_state=\"pending\")\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n roles=[\"administrator\"],\n )\n\n response = self.client.get(\n self._get_url(video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"id\": str(thumbnail.id),\n \"active_stamp\": None,\n \"is_ready_to_show\": False,\n \"upload_state\": \"pending\",\n \"urls\": None,\n \"video\": str(video.id),\n },\n )", "def test_api_thumbnail_retrieve_by_organization_instructor(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=INSTRUCTOR,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def test_public(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"public\"})", "def test_photos_limited_to_user(self, api_client, test_user):\n\n user2 = get_user_model().objects.create_user(\n 'otheruser@company.com',\n 'password123'\n )\n sample_photo(user=user2)\n sample_photo(user=test_user)\n\n api_client.force_authenticate(test_user)\n res = api_client.get(PHOTO_URL)\n\n photos = Photo.objects.filter(user=test_user)\n serializer = PhotoSerializer(photos, many=True)\n\n assert res.status_code == status.HTTP_200_OK\n assert len(res.data) == 1\n assert res.data == serializer.data", "def test_public(client, url):\n response = client.get(url, secure=True)\n assert response.status_code == 200", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_logged_in_contributor(self):\n self.make_logged_in_contributor()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PRIVATE)", "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_photo_classification_view_set_get_public(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n photo_models.PhotoClassification.objects.create_or_update(name='City', public=False)\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications')\n results = request.data['results']\n\n self.assertEquals(len(results), 13)", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def test_aws_service_api_image_get(self):\n pass", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.request.user = User.objects.get(username='doc')\n self.assertTrue(\n self.action.should_render(\n context=self._create_request_context(\n User.objects.get(username='doc'))))", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def glance_update_and_set_public(glance, image, image_info):\n image_properties = image_info['image_properties']\n try:\n logger.debug(\"glance image update: properties=%s\", image_properties)\n glance.images.update(image.id, **image_properties)\n logger.debug(\"glance image update: visibility=public\")\n glance.images.update(image.id, visibility='public')\n except Exception:\n logger.exception(\"Updating (-> public) Glance image '%s' [%s] failed\", image.name, image.id)\n return 1\n\n return 0", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_api_thumbnail_read_detail_anonymous(self):\n video = VideoFactory()\n thumbnail = ThumbnailFactory(video=video)\n response = self.client.get(self._get_url(video, thumbnail))\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_document_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=True,\n title=\"document-001\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n jwt_token = AccessToken(context.get(\"jwt\"))\n\n self.assertEqual(\n jwt_token.payload[\"permissions\"],\n {\"can_access_dashboard\": False, \"can_update\": False},\n )\n self.assertEqual(context.get(\"state\"), \"success\")\n self.assertEqual(\n context.get(\"resource\"),\n {\n \"active_stamp\": \"1569309880\",\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"id\": str(document.id),\n \"upload_state\": document.upload_state,\n \"title\": document.title,\n \"extension\": None,\n \"filename\": \"playlist-003_document-001\",\n \"playlist\": {\n \"id\": str(document.playlist.id),\n \"title\": \"playlist-003\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"url\": (\n \"https://abc.cloudfront.net/301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\"\n \"/document/1569309880?response-content-disposition=attachment%3B\"\n \"+filename%3Dplaylist-003_document-001\"\n ),\n },\n )\n self.assertEqual(context.get(\"modelName\"), \"documents\")\n self.assertIsNone(context.get(\"context_id\"))", "def test_admin_api_organization_accesses_request_authenticated(self):\n user = factories.UserFactory(is_staff=False, is_superuser=False)\n self.client.login(username=user.username, password=\"password\")\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertContains(\n response,\n \"You do not have permission to perform this action.\",\n status_code=403,\n )", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def allowed_to_preview(user):\n if (\n user.is_authenticated and\n user.is_active and\n user.is_staff\n ):\n return True\n return False", "def test_admin_view_access(request_ctx):\n user = User.get(email=\"root@test0.edu\")\n with request_ctx(\"/org_invitatin_summary\") as ctx:\n login_user(user, remember=True)\n rv = ctx.app.full_dispatch_request()\n assert rv.status_code == 200\n assert b\"<!DOCTYPE html>\" in rv.data, \"Expected HTML content\"\n assert b\"Organisation Invitation Summary\" in rv.data\n assert b\"root@test0.edu\" in rv.data", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def is_authorized(self, request, obj=None):\r\n if request.method == 'GET':\r\n return True\r\n else:\r\n return False", "def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def is_authenticated(self):\n return True", "def test_should_render_with_user_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='doc')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n if getattr(self, 'read_only_always_show', False):\n self.assertTrue(\n self.action.should_render(context=request_context))\n else:\n self.assertFalse(\n self.action.should_render(context=request_context))", "def test_read_unauthenticated(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def submit_image_no_login(self):\n\n result = self.client.get(\"/submit_image\", follow_redirects=True)\n self.assertIn(b\"Password\", result.data)", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n user=User.objects.get(username='doc'))))", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n user=User.objects.get(username='doc'))))", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "async def test___init__(image_config: ImageConfig, image_config_signed: ImageConfig):\n assert image_config\n assert image_config_signed", "def test_home_as_user(self):\n self.client.login(username=\"test_user_1\", password=\"test\")\n response = self.client.get(\"/images/contents/\")\n self.assertJson(\n response,\n {\n \"files\": [],\n \"folders\": [],\n \"name\": \"\",\n \"path\": \"/\",\n },\n )", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_url_is_accessible_to_externals(self):\n\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 200)\n\n self.user.is_external = False\n self.user.save()\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 404)", "def test_has_object_read_permission_private(\n mock_parent_permission, api_rf, profile_factory\n):\n profile = profile_factory(is_private=True)\n request = api_rf.get(\"/\")\n\n expected = mock_parent_permission.return_value\n\n assert profile.has_object_read_permission(request) == expected\n assert mock_parent_permission.call_count == 1\n assert mock_parent_permission.call_args[0] == (request,)", "def test_user_avatar_serving(self):\n User = get_user_model()\n test_user = User.objects.create_user('Bob', 'bob@bob.com', 'pass123',\n set_default_avatar=True)\n\n avatar_url = reverse('misago:user-avatar', kwargs={\n 'pk': test_user.pk,\n 'hash': test_user.avatar_hash,\n 'size': 150,\n })\n response = self.client.get(avatar_url)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'image/png')", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_pull_public_acl(self):\n # Use defaults for format, arch, os, ostcount, replication\n pr = {\n 'system': self.system,\n 'itype': self.itype,\n 'tag': self.tag,\n 'remotetype': 'dockerv2',\n 'userACL': [1001, 1002],\n 'groupACL': [1003, 1004]\n }\n # Do the pull\n session = self.m.new_session(self.auth, self.system)\n rec = self.m.pull(session, pr) # ,delay=False)\n id = rec['_id']\n self.assertIsNotNone(rec)\n # Confirm record\n q = {'system': self.system, 'itype': self.itype,\n 'pulltag': self.tag}\n state = self.time_wait(id)\n mrec = self.images.find_one(q)\n self.assertIn('_id', mrec)\n self.assertIn('userACL', mrec)\n self.assertIn('ENV', mrec)\n # Track through transistions\n state = self.time_wait(id)\n self.assertEqual(state, 'READY')\n mrec = self.images.find_one(q)\n self.assertIn('ENV', mrec)\n self.assertIn('private', mrec)\n self.assertFalse(mrec['private'])", "def get_public_images_for_user(username):\n\n user = get_user_by_username(username)\n images = user.images\n public_images = [img for img in images if img.permission.value == \"PUBLIC\"]\n return public_images" ]
[ "0.7295332", "0.69605845", "0.69604754", "0.67785037", "0.66477525", "0.6625932", "0.65665686", "0.6512257", "0.65008277", "0.65008277", "0.6442916", "0.62628233", "0.62376827", "0.6148696", "0.6145751", "0.6145588", "0.61448425", "0.61161464", "0.6112119", "0.60931945", "0.60678583", "0.60609406", "0.6039262", "0.60218835", "0.6008595", "0.5973793", "0.5971059", "0.59643036", "0.59339625", "0.5924705", "0.58842534", "0.58698183", "0.58428276", "0.58373016", "0.58268356", "0.58100086", "0.57893425", "0.57800645", "0.57708025", "0.57595265", "0.5739561", "0.5735132", "0.5695507", "0.5694168", "0.56831133", "0.5678118", "0.56724846", "0.56722164", "0.5661081", "0.5659259", "0.5655255", "0.5653095", "0.5650035", "0.56470037", "0.56408244", "0.56387526", "0.56364834", "0.56352764", "0.56284624", "0.561993", "0.5619672", "0.561877", "0.561551", "0.5603739", "0.55973023", "0.55953574", "0.55783886", "0.5570637", "0.55674523", "0.5544763", "0.5538145", "0.5534881", "0.5533675", "0.5533675", "0.55326045", "0.55312675", "0.54990506", "0.54973805", "0.5495561", "0.5487254", "0.5483181", "0.54765546", "0.54757553", "0.54756534", "0.54756534", "0.5472234", "0.5464121", "0.545819", "0.545702", "0.545199", "0.5450066", "0.54469836", "0.54455847", "0.5441883", "0.54381424", "0.54380184", "0.5436803", "0.5434223", "0.5429193", "0.5414154" ]
0.66329974
5
Tests that an authenticated context (with is_admin set to False) cannot access an image (which it does not own) with is_public set to False.
def test_auth_private_unowned(self): self.do_visible(False, 'pattieblack', False, tenant='froggy')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_aws_service_api_private_images_get(self):\n pass", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)", "def test_category_view_not_logged_in(testapp):\n from webtest.app import AppError\n with pytest.raises(AppError, message=\"403 Forbidden\"):\n testapp.get('/category/1')", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_logged_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n utils.test_cannot_access(self, self.url, expected_url)", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_non_owner_authenticated_user_read_given_blogpost_hidden_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create(hidden=1)\r\n user = UserFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').read, blogpost)", "def test_delete_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_delete_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_api_thumbnail_administrator_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_read_unauthenticated(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def assert_user_cannot_retrieve_thumbnail(self, user, thumbnail):\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_get_non_owner(self):\n another_user = CustomUser(id=101, email='another_user@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n self.client.login(email='another_user@mail.com', password='testpassword')\n\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def test_tenant_secret_page_on_external_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=self.other_domain.domain)\n self.assertEqual(response.status_code, 403)", "def test_detail_blocked_forbidden_even_if_contributor(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c3.pk))\n self.assert404(resp)", "def test_show_host_not_exist(self):\n self.req.environ['cinder.context'].is_admin = True\n dest = 'dummydest'\n self.assertRaises(webob.exc.HTTPNotFound,\n self.controller.show,\n self.req, dest)", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_requires_privilege_no_such(self):\n @requires_privilege('bomboozle', domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n requestor_role = arbitrary.role()\n request = HttpRequest()\n request.role = requestor_role\n with self.assertRaises(PermissionDenied):\n view(request)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_tenant_secret_page_on_root_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=self.tenant_root_domain)\n self.assertEqual(response.status_code, 403)", "def test_non_owner_authenticated_user_read_blogposts_for_given_hidden_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create(hidden=1)\r\n user = UserFactory.create()\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').read, app_id=app.id)", "def test_not_authenticated_non_public_course_with_other_username(self):\n self.client.logout()\n self.verify_response(403)", "def test_list_containers_with_non_authorized_user(self):\n\n test_auth_provider = self.os_operator.auth_provider\n # Get auth for the test user\n test_auth_provider.auth_data\n\n # Get fresh auth for test user and set it to next auth request for\n # account_client\n delattr(test_auth_provider, 'auth_data')\n test_auth_new_data = test_auth_provider.auth_data\n self.account_client.auth_provider.set_alt_auth_data(\n request_part='headers',\n auth_data=test_auth_new_data\n )\n\n params = {'format': 'json'}\n # list containers with non-authorized user token\n self.assertRaises(lib_exc.Forbidden,\n self.account_client.list_account_containers,\n params=params)", "def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_delete_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_requires_privilege_no_current_role(self):\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n request = HttpRequest()\n with self.assertRaises(PermissionDenied):\n view(request)", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_not_authenticated_uri(self):\n request = self.factory.get(self.uri)\n response = self.view(request)\n response.render()\n self.assertEqual(response.status_code, 401,\n 'Expected Response Code 401, received {0} instead.'\n .format(response.status_code))", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def testUpdatePhotoForbidden(self):\n photo_id = self._UploadEpisodeWithPhoto()\n\n # Now, after creating the episode and photo with 'user' (self._cookie), try to update with 'user3' (self._cookie3).\n self.assertRaisesHttpError(403, self._tester.UpdatePhoto, self._cookie3, photo_id, caption='An Updated Caption',\n placemark={'iso_country_code': 'US', 'country': 'United States',\n 'state': 'NY', 'locality': 'New York', 'sublocality': 'NoHo',\n 'thoroughfare': 'Broadway', 'subthoroughfare': '682'})", "def test_not_logged_in(self):\n self.request.user = None\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def submit_image_no_login(self):\n\n result = self.client.get(\"/submit_image\", follow_redirects=True)\n self.assertIn(b\"Password\", result.data)", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def check_for_no_privates(context):\n json_data = context.response.json()\n\n if \"component_analyses\" in json_data:\n vulnerabilities = json_data['component_analyses']['vulnerability']\n for v in vulnerabilities:\n assert \"cvss\" in v\n assert \"is_private\" in v\n assert \"vendor_cve_ids\" in v\n if v[\"is_private\"]:\n raise Exception(\"Private vulnerability found\")", "def assert_response_resource_not_accessible(self, response):\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response.json(),\n {\"detail\": \"You do not have permission to perform this action.\"},\n )", "def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def test_access_negative(self, api):\n self.builder.add_user(api.get_user())\n r1 = api.access_user(api.get_user(), False)\n access_false = self.builder.get_access(api.get_user())\n self.builder.del_user(api.get_user())\n assert access_false == 0\n assert r1.status_code == 200", "def test_get_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)" ]
[ "0.72276163", "0.69970745", "0.6978492", "0.68080735", "0.6803085", "0.6803085", "0.67497104", "0.6643472", "0.6630605", "0.6568257", "0.65023047", "0.64936197", "0.6412895", "0.6403637", "0.6389285", "0.6367777", "0.6367777", "0.6348167", "0.6298395", "0.6289524", "0.6269371", "0.6262778", "0.6261456", "0.6211032", "0.62094027", "0.62049794", "0.62049794", "0.62049794", "0.62049794", "0.6189842", "0.618778", "0.617157", "0.61683536", "0.61683536", "0.61624414", "0.6134748", "0.6094513", "0.6078269", "0.60768706", "0.6063134", "0.6054045", "0.6039832", "0.60378665", "0.6026561", "0.6026255", "0.6022735", "0.60179156", "0.60146564", "0.60136193", "0.6011736", "0.6010748", "0.5999383", "0.5993405", "0.5990542", "0.5971448", "0.59604335", "0.5941797", "0.5931338", "0.5925389", "0.5922652", "0.5920036", "0.5919962", "0.591994", "0.5916757", "0.59067863", "0.59006655", "0.5883987", "0.58822274", "0.5854144", "0.5853549", "0.58501214", "0.5843006", "0.58380663", "0.5835101", "0.58336836", "0.5830434", "0.5828179", "0.5828008", "0.58249456", "0.58197385", "0.58149815", "0.5812152", "0.58041817", "0.5799639", "0.5792681", "0.5792681", "0.5792681", "0.5792681", "0.5792482", "0.5791099", "0.57910484", "0.5786889", "0.5775021", "0.5767854", "0.5764805", "0.5762294", "0.57586396", "0.5756566", "0.57542723", "0.57468605" ]
0.6672808
7
Tests that an authenticated context (with is_admin set to False) can access an image (which it does own) with is_public set to False.
def test_auth_private_owned(self): self.do_visible(True, 'pattieblack', False, tenant='pattieblack')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_private_images_get(self):\n pass", "def test_aws_service_api_public_image_get(self):\n pass", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_aws_service_api_public_images_get(self):\n pass", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_api_thumbnail_administrator_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_photos_limited_to_user(self, api_client, test_user):\n\n user2 = get_user_model().objects.create_user(\n 'otheruser@company.com',\n 'password123'\n )\n sample_photo(user=user2)\n sample_photo(user=test_user)\n\n api_client.force_authenticate(test_user)\n res = api_client.get(PHOTO_URL)\n\n photos = Photo.objects.filter(user=test_user)\n serializer = PhotoSerializer(photos, many=True)\n\n assert res.status_code == status.HTTP_200_OK\n assert len(res.data) == 1\n assert res.data == serializer.data", "def test_anonymous_public(self):\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_api_thumbnail_retrieve_by_playlist_admin(self):\n playlist_access = PlaylistAccessFactory(\n playlist=self.some_video.playlist,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n playlist_access.user, self.some_thumbnail\n )", "def test_access_is_password_protected(self):\n\n # Directly create an Upload object in the DB.\n upload = create_url_upload(\"https://google.com/robots.txt\")\n response = self.client.get(reverse(\"upload-access\", args=[upload.id]))\n\n # Ensure request does not error, no special code expected here.\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(response, \"Password\")", "def test_api_thumbnail_instructor_read_detail_in_read_only(self):\n thumbnail = ThumbnailFactory()\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=thumbnail.video.playlist,\n permissions__can_update=False,\n )\n\n response = self.client.get(\n self._get_url(thumbnail.video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_api_thumbnail_retrieve_by_organization_instructor(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=INSTRUCTOR,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))", "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def glance_update_and_set_public(glance, image, image_info):\n image_properties = image_info['image_properties']\n try:\n logger.debug(\"glance image update: properties=%s\", image_properties)\n glance.images.update(image.id, **image_properties)\n logger.debug(\"glance image update: visibility=public\")\n glance.images.update(image.id, visibility='public')\n except Exception:\n logger.exception(\"Updating (-> public) Glance image '%s' [%s] failed\", image.name, image.id)\n return 1\n\n return 0", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_aws_service_api_image_get(self):\n pass", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_user_visibility(app, resource):\n nethz = 'Something'\n with app.user(nethz=nethz):\n # Create fake signup with different nethz\n own = str(app.data.driver.db[resource].insert({'nethz': nethz}))\n other = str(app.data.driver.db[resource].insert({'nethz': 'trolo'}))\n\n # Resource: Can only see own, not both signups\n response = app.client.get('/' + resource, assert_status=200)\n assert len(response['_items']) == 1\n assert response['_items'][0]['nethz'] == nethz\n\n # Items\n own_url = '/%s/%s' % (resource, own)\n other_url = '/%s/%s' % (resource, other)\n\n # Get\n app.client.get(own_url, assert_status=200)\n app.client.get(other_url, assert_status=404)\n\n # Patch (if we can see item, we get 428 since etag is missing)\n app.client.patch(own_url, data={}, assert_status=428)\n app.client.patch(other_url, data={}, assert_status=404)\n\n # Delete (etag missing again)\n app.client.delete(own_url, assert_status=428)\n app.client.delete(other_url, assert_status=404)", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def test_logged_in_contributor(self):\n self.make_logged_in_contributor()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PRIVATE)", "def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)", "def test_api_thumbnail_read_detail_admin_user(self):\n video = VideoFactory(\n uploaded_on=datetime(2018, 8, 8, tzinfo=timezone.utc), upload_state=\"ready\"\n )\n thumbnail = ThumbnailFactory(video=video, upload_state=\"pending\")\n\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n roles=[\"administrator\"],\n )\n\n response = self.client.get(\n self._get_url(video, thumbnail),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"id\": str(thumbnail.id),\n \"active_stamp\": None,\n \"is_ready_to_show\": False,\n \"upload_state\": \"pending\",\n \"urls\": None,\n \"video\": str(video.id),\n },\n )", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def test_document_not_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=False,\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n\n self.assertIsNone(context.get(\"resource\"))\n self.assertEqual(context.get(\"state\"), \"error\")\n self.assertEqual(context.get(\"modelName\"), \"documents\")", "def test_public(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"public\"})", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.request.user = User.objects.get(username='doc')\n self.assertTrue(\n self.action.should_render(\n context=self._create_request_context(\n User.objects.get(username='doc'))))", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_admin_api_organization_accesses_request_authenticated(self):\n user = factories.UserFactory(is_staff=False, is_superuser=False)\n self.client.login(username=user.username, password=\"password\")\n organization = factories.OrganizationFactory()\n response = self.client.get(\n f\"/api/v1.0/admin/organizations/{organization.id}/accesses/\"\n )\n\n self.assertContains(\n response,\n \"You do not have permission to perform this action.\",\n status_code=403,\n )", "def test_has_object_read_permission_private(\n mock_parent_permission, api_rf, profile_factory\n):\n profile = profile_factory(is_private=True)\n request = api_rf.get(\"/\")\n\n expected = mock_parent_permission.return_value\n\n assert profile.has_object_read_permission(request) == expected\n assert mock_parent_permission.call_count == 1\n assert mock_parent_permission.call_args[0] == (request,)", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_pull_public_acl(self):\n # Use defaults for format, arch, os, ostcount, replication\n pr = {\n 'system': self.system,\n 'itype': self.itype,\n 'tag': self.tag,\n 'remotetype': 'dockerv2',\n 'userACL': [1001, 1002],\n 'groupACL': [1003, 1004]\n }\n # Do the pull\n session = self.m.new_session(self.auth, self.system)\n rec = self.m.pull(session, pr) # ,delay=False)\n id = rec['_id']\n self.assertIsNotNone(rec)\n # Confirm record\n q = {'system': self.system, 'itype': self.itype,\n 'pulltag': self.tag}\n state = self.time_wait(id)\n mrec = self.images.find_one(q)\n self.assertIn('_id', mrec)\n self.assertIn('userACL', mrec)\n self.assertIn('ENV', mrec)\n # Track through transistions\n state = self.time_wait(id)\n self.assertEqual(state, 'READY')\n mrec = self.images.find_one(q)\n self.assertIn('ENV', mrec)\n self.assertIn('private', mrec)\n self.assertFalse(mrec['private'])", "def submit_image_no_login(self):\n\n result = self.client.get(\"/submit_image\", follow_redirects=True)\n self.assertIn(b\"Password\", result.data)", "def test_photo_classification_view_set_get_public(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n photo_models.PhotoClassification.objects.create_or_update(name='City', public=False)\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications')\n results = request.data['results']\n\n self.assertEquals(len(results), 13)", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_api_thumbnail_read_detail_anonymous(self):\n video = VideoFactory()\n thumbnail = ThumbnailFactory(video=video)\n response = self.client.get(self._get_url(video, thumbnail))\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_returns_global(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 1)\n self.assertTrue((template.user is None for template in templates))", "def is_authorized(self, request, obj=None):\r\n if request.method == 'GET':\r\n return True\r\n else:\r\n return False", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_public(client, url):\n response = client.get(url, secure=True)\n assert response.status_code == 200", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def test_edit_image_instance(self):\n self.client.force_authenticate(self.user1)\n data = {\n \"img_name\": \"photo_user1\",\n \"img_description\": \"photo of user1\",\n \"favourite\": True,\n \"width\": 700,\n \"height\": 500,\n \"share_user\": [],\n }\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.put(url, data, format=\"multipart\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Get edited object, convert to dict and compare with inputs\n obj = model_to_dict(Images.objects.get(id=1))\n for field, edited_data in data.items():\n self.assertEqual(edited_data, obj[field])\n # Check if image was edited to a new input\n edited_img = Image.open(self.test_pic_folder + \"/test.png\")\n self.assertEqual(edited_img.size, (700, 500))", "def test_should_render_with_user_in_read_only(self) -> None:\n self.request.user = User.objects.get(username='doc')\n\n # Turning on read-only mode prevents creation of some objects so call\n # _create_request_context first.\n request_context = self._create_request_context(user=self.request.user)\n\n settings = {\n 'site_read_only': True,\n }\n\n with override_feature_check(unified_banner_feature.feature_id, False):\n with self.siteconfig_settings(settings):\n if getattr(self, 'read_only_always_show', False):\n self.assertTrue(\n self.action.should_render(context=request_context))\n else:\n self.assertFalse(\n self.action.should_render(context=request_context))", "def test_url_is_accessible_to_externals(self):\n\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 200)\n\n self.user.is_external = False\n self.user.save()\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 404)", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "def test_empty_public(self):\n self.do_visible(True, None, True, is_admin=True)", "def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)", "def test_non_owner_authenticated_user_read_given_blogpost_hidden_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create(hidden=1)\r\n user = UserFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').read, blogpost)", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def is_authorized(self, request, obj=None):\r\n return True", "def test_read_unauthenticated(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_list_image(self):\n pass", "def test_document_publicly_accessible(self):\n document = DocumentFactory(\n id=\"301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\",\n playlist__title=\"playlist-003\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n is_public=True,\n title=\"document-001\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n uploaded_on=\"2019-09-24 07:24:40+00\",\n )\n\n response = self.client.get(f\"/documents/{document.pk}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<html>\")\n content = response.content.decode(\"utf-8\")\n\n match = re.search(\n '<div id=\"marsha-frontend-data\" data-context=\"(.*)\">', content\n )\n\n context = json.loads(unescape(match.group(1)))\n jwt_token = AccessToken(context.get(\"jwt\"))\n\n self.assertEqual(\n jwt_token.payload[\"permissions\"],\n {\"can_access_dashboard\": False, \"can_update\": False},\n )\n self.assertEqual(context.get(\"state\"), \"success\")\n self.assertEqual(\n context.get(\"resource\"),\n {\n \"active_stamp\": \"1569309880\",\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"id\": str(document.id),\n \"upload_state\": document.upload_state,\n \"title\": document.title,\n \"extension\": None,\n \"filename\": \"playlist-003_document-001\",\n \"playlist\": {\n \"id\": str(document.playlist.id),\n \"title\": \"playlist-003\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"url\": (\n \"https://abc.cloudfront.net/301b5f4f-b9f1-4a5f-897d-f8f1bf22c396\"\n \"/document/1569309880?response-content-disposition=attachment%3B\"\n \"+filename%3Dplaylist-003_document-001\"\n ),\n },\n )\n self.assertEqual(context.get(\"modelName\"), \"documents\")\n self.assertIsNone(context.get(\"context_id\"))", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n user=User.objects.get(username='doc'))))", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n user=User.objects.get(username='doc'))))", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)" ]
[ "0.72539985", "0.70253825", "0.7010216", "0.66228914", "0.65394044", "0.652322", "0.649416", "0.649245", "0.649245", "0.6490199", "0.6474233", "0.64500856", "0.64078796", "0.6296748", "0.6282635", "0.6253098", "0.6213612", "0.61675084", "0.61526585", "0.60946745", "0.6082156", "0.60719025", "0.60697377", "0.6060685", "0.6052906", "0.6052088", "0.6029411", "0.6021228", "0.5978759", "0.5924279", "0.59127575", "0.5911984", "0.5785717", "0.57850695", "0.5778379", "0.57703716", "0.57665867", "0.5756602", "0.57523507", "0.57510763", "0.5750919", "0.57450074", "0.5722675", "0.57194644", "0.5713587", "0.5703303", "0.5699166", "0.56897044", "0.5682187", "0.56724834", "0.56717384", "0.56675386", "0.56654495", "0.56650347", "0.56614584", "0.5656874", "0.56186074", "0.5610477", "0.5606357", "0.5605055", "0.5597251", "0.55859756", "0.55857927", "0.5585302", "0.55807745", "0.5572201", "0.5572163", "0.55659384", "0.5562999", "0.55527765", "0.5545993", "0.55414367", "0.5534327", "0.5527507", "0.5524385", "0.5518323", "0.5508214", "0.5508214", "0.5503071", "0.54966587", "0.5494702", "0.5493161", "0.5491679", "0.5489377", "0.5471644", "0.5467266", "0.5463829", "0.5462222", "0.544784", "0.54452497", "0.5442545", "0.54388475", "0.54361355", "0.5435112", "0.5433732", "0.54324824", "0.54310524", "0.54294235", "0.54294235", "0.5427496" ]
0.61762923
17
Tests that an authenticated context (with is_admin set to False) cannot share an image it neither owns nor is shared with it.
def test_auth_sharable(self): self.do_sharable(False, 'pattieblack', None, tenant='froggy')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def cant_share_photo(request, ttl=None,*args, **kwargs):\n\tif ttl:\n\t\ttry:\n\t\t\tttl = int(ttl)\n\t\texcept ValueError:\n\t\t\tttl = None\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_not_shared.html\",{'photo_caption':photo_caption,'photo_id':photo_id,'photo_url':photo_url,\\\n\t\t'photo_owner_username':photo_owner_username,'origin':origin,'ttl':ttl})", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def ensure_share(self, context, share, share_server=None):\n pass", "def ensure_share(self, context, share, share_server=None):\r\n LOG.debug(\"Ensure share.\")", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def canShare(self):\n return False", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_auth_sharable_can_share(self):\n self.do_sharable(True, 'pattieblack', FakeMembership(True),\n tenant='froggy')", "def deny_access(self, context, share, access, share_server=None):\n self._get_helper(share).deny_access('/', share, access)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def deny_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Deny access.\")\r\n self.helper._deny_access(share['name'], access, share['share_proto'])", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_empty_shared(self):\n self.do_sharable(False, 'pattieblack', None, is_admin=True)\n self.do_sharable(False, 'pattieblack', FakeMembership(True),\n is_admin=True)", "def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))", "def test_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def test_auth_sharable_owned(self):\n self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_anon_shared(self):\n self.do_sharable(False, 'pattieblack', None)\n self.do_sharable(False, 'pattieblack', FakeMembership(True))", "def test_share_inactive_user(self):\n george = self.george\n alva = self.alva\n john = self.john\n bikes = self.bikes\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(alva),\n PrivilegeCodes.NONE)\n\n # inactive users can't be granted access\n # set john to an inactive user\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n john.is_active = True\n john.save()\n\n # inactive grantor can't grant access\n # let's first grant John access privilege\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(john),\n PrivilegeCodes.CHANGE)\n\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n john.uaccess.share_resource_with_user(\n bikes, alva, PrivilegeCodes.VIEW)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_logged_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n utils.test_cannot_access(self, self.url, expected_url)", "def test_logged_in_friend_not_in_group(self):\n\n self.make_logged_in_friend_not_in_group()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_ALLFRIENDS)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_ALLFRIENDS)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_ALLFRIENDS)", "def test_kyc_post_legal_share_holder(self):\n pass", "def test_review_story_restrict_to_only_admin(self):\n self.client.post('/api/stories', headers={'token': user_token}, data=json.dumps(story1))\n res = self.client.put('/api/stories/1/review', headers={'token': user_token}, data=json.dumps({\n 'status': 'Approved'\n }))\n result = json.loads(res.data.decode())\n self.assertEqual(result['message'], 'Permission denied')\n self.assertEqual(res.status_code, 403)", "def test_owner_edit_assessment_invalid(self):\n req, resp = data.get_assessment(self.contract['id'])\n response = self.user_01.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_kyc_put_legal_share_holder(self):\n pass", "def _deny_access(self, context, share, access, share_server=None):\n if access['access_type'] != 'ip':\n LOG.debug('Quobyte driver only supports ip access control. '\n 'Ignoring deny access call for %s , %s',\n share['name'],\n self._get_project_name(context, share['project_id']))\n return\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n call_params = {\n \"volume_uuid\": volume_uuid,\n \"remove_allow_ip\": access['access_to']}\n self.rpc.call('exportVolume', call_params)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def test_put_unauthorized(self):\n\n url = reverse('file')\n\n data = {\n 'shard_id': self.shard1.id,\n 'link_id': \"b8866161-0b1f-4a8e-acde-07047313ec8f\",\n 'parent_datastore_id': str(self.test_datastore_obj.id),\n 'chunk_count': 1,\n 'size': 512,\n }\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_tenant_secret_page_on_other_site_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=self.other_site.domain)\n self.assertEqual(response.status_code, 403)", "def test_delete_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_share(self):\n\n # In the actual test, we'll want to confirm that an IP address\n # can be shared to a group\n\n # Try to share with the group--fails for now (operation not\n # implemented in nova); note: change 1 to group, '10.0.0.1' to IP\n dtutil.assert_raises(novaclient.OpenStackException,\n self.server.share_ip, 1, '10.0.0.1', True)", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def assert_response_resource_not_accessible(self, response):\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response.json(),\n {\"detail\": \"You do not have permission to perform this action.\"},\n )", "def test_not_permitted(self, default_store):\n course = self.create_course_with_orphans(default_store)\n orphan_url = reverse_course_url('orphan_handler', course.id)\n\n test_user_client, test_user = self.create_non_staff_authed_user_client()\n CourseEnrollment.enroll(test_user, course.id)\n response = test_user_client.get(orphan_url)\n self.assertEqual(response.status_code, 403)\n response = test_user_client.delete(orphan_url)\n self.assertEqual(response.status_code, 403)", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_get_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_attachment_deletion_allowed_no_instance(self, mock_get_server):\n attachment = self._get_attachment(with_instance_id=False)\n self.volume_api.attachment_deletion_allowed(self.context, attachment)\n mock_get_server.assert_not_called()", "def test_delete_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_upsert_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_tenant_secret_page_on_external_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=self.other_domain.domain)\n self.assertEqual(response.status_code, 403)", "def test_share_no_os_environ(self):\n with self.sys_exit_patch:\n with self.assertRaises(SystemExit):\n self.inst.share(\n \"test-container\",\n \"test-recipient\",\n \"r\",\n \"w\"\n )\n self.sys_exit_mock.assert_called_once()", "def test_mod_not_reported(self):\n override_acl(self.user, {'can_moderate_private_threads': 1})\n\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 404)", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def allow_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Allow access.\")\r\n self.helper._allow_access(share['name'], access, share['share_proto'])", "def test_image_fail(self):\n client = Client()\n review1_id = Review.objects.get(content='TEST_CONTENT').id\n review2_id = Review.objects.get(content='TEST_CONTENT2').id\n review3_id = Review.objects.get(content='TEST_CONTENT3').id\n review4_id = Review.objects.get(content='TEST_CONTENT4').id\n no_review_id = review1_id + review2_id + review3_id + review4_id\n\n img_and_file = make_image_file()\n response = client.post('/api/review/'+str(review1_id)+'/image/',\n data={'image': img_and_file[1]})\n self.assertEqual(response.status_code, 401)\n client.login(username='TEST_USER_2',\n email='TEST_EMAIL_2', password='TEST_PW_2')\n response = client.post('/api/review/'+str(review1_id)+'/image/',\n data={'image': img_and_file[1]})\n self.assertEqual(response.status_code, 403)\n response = client.post('/api/review/'+str(no_review_id)+'/image/',\n data={'image': img_and_file[1]})\n self.assertEqual(response.status_code, 404)\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.get('/api/review/'+str(review1_id)+'/image/')\n self.assertEqual(response.status_code, 405)\n response = client.post('/api/review/'+str(review1_id)+'/image/',\n data={'image': img_and_file[0].tobytes()})\n self.assertEqual(response.status_code, 400)", "def test_an_unauthenticated_user_cannot_like_article(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n like = self.client.post('/api/articles/{}/like/'.format(slug),\n format='json')\n\n self.assertEqual(\n like.data['detail'], \"Authentication credentials were not provided.\")\n self.assertEqual(like.status_code, 401)", "def test_detail_blocked_forbidden_even_if_contributor(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c3.pk))\n self.assert404(resp)", "def test_delete_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_thread_is_not_moderated(self):\n self.assertRaises(\n ObjectDoesNotExist,\n Thread.public.get_by_user,\n **{'thread_id': self.thread.pk, 'user': self.user}\n )\n self.group.private = False\n self.group.save()\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def test_user_not_in_group_cannot_update_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_user_not_in_group_cannot_delete(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def testMissingImage(self):\n self.assertNotIn('no_image', self.data)", "def test_user_does_not_have_access(self):\n self.assertRaises(\n ObjectDoesNotExist,\n Thread.public.get_by_user,\n **{'thread_id': self.thread.pk, 'user': self.user}\n )", "def test_dashboards_v2_share(self):\n pass", "def test_an_unauthenticated_user_cannot_dislike_article(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n dislike = self.client.post('/api/articles/{}/dislike/'.format(slug),\n format='json')\n\n self.assertEqual(\n dislike.data['detail'], \"Authentication credentials were not provided.\")\n self.assertEqual(dislike.status_code, 401)", "def test_destroy_not_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token_1)\n response = self.client.delete(path=url)\n self.assertEqual(first=403, second=response.status_code)\n self.assertEqual(first=1, second=Post.objects.all().count())", "def test_non_owner_authenticated_user_read_given_blogpost_hidden_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create(hidden=1)\r\n user = UserFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').read, blogpost)" ]
[ "0.73269576", "0.66646326", "0.662856", "0.6424367", "0.6423024", "0.63863295", "0.62619466", "0.6225412", "0.62130284", "0.62111557", "0.62085885", "0.61964643", "0.61871755", "0.60659873", "0.6040665", "0.59974575", "0.59781015", "0.59628546", "0.5954561", "0.59475374", "0.59475374", "0.5924636", "0.5921192", "0.5917659", "0.5900683", "0.58979565", "0.5883806", "0.58443093", "0.584224", "0.5825702", "0.58178365", "0.5816217", "0.5796267", "0.5793036", "0.5791574", "0.5781296", "0.57399386", "0.5739251", "0.5733965", "0.57266945", "0.5716642", "0.5701909", "0.5697053", "0.5694394", "0.56727666", "0.56676066", "0.56423753", "0.5636154", "0.5622662", "0.56132555", "0.55967915", "0.5595689", "0.55932117", "0.55931246", "0.558325", "0.5580135", "0.55789787", "0.5577603", "0.5547487", "0.5540788", "0.5539854", "0.5537023", "0.5535248", "0.55298233", "0.55298233", "0.5527244", "0.55077994", "0.55053633", "0.5504062", "0.55036145", "0.5498118", "0.5488216", "0.5477599", "0.5473769", "0.5472306", "0.546846", "0.54622746", "0.54596835", "0.5457088", "0.5452832", "0.5452372", "0.54499435", "0.544908", "0.54468244", "0.5444355", "0.5441136", "0.5436374", "0.5429078", "0.54277796", "0.5421337", "0.5419499", "0.54185563", "0.541499", "0.5412057", "0.54115516", "0.5410084", "0.54078525", "0.54064107", "0.5385242", "0.5382084" ]
0.5450656
81
Tests that an authenticated context (with is_admin set to True) can share an image it neither owns nor is shared with it.
def test_auth_sharable_admin(self): self.do_sharable(True, 'pattieblack', None, tenant='froggy', is_admin=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def ensure_share(self, context, share, share_server=None):\n pass", "def ensure_share(self, context, share, share_server=None):\r\n LOG.debug(\"Ensure share.\")", "def test_auth_sharable_can_share(self):\n self.do_sharable(True, 'pattieblack', FakeMembership(True),\n tenant='froggy')", "def canShare(self):\n return False", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def cant_share_photo(request, ttl=None,*args, **kwargs):\n\tif ttl:\n\t\ttry:\n\t\t\tttl = int(ttl)\n\t\texcept ValueError:\n\t\t\tttl = None\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_not_shared.html\",{'photo_caption':photo_caption,'photo_id':photo_id,'photo_url':photo_url,\\\n\t\t'photo_owner_username':photo_owner_username,'origin':origin,'ttl':ttl})", "def test_auth_sharable_owned(self):\n self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def test_empty_shared(self):\n self.do_sharable(False, 'pattieblack', None, is_admin=True)\n self.do_sharable(False, 'pattieblack', FakeMembership(True),\n is_admin=True)", "def cli(env, identifier, account_id):\n\n image_mgr = SoftLayer.ImageManager(env.client)\n image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')\n shared_image = image_mgr.share_image(image_id, account_id)\n\n if shared_image:\n env.fout(f\"Image template {identifier} was shared to account {account_id}.\")", "def test_anon_shared(self):\n self.do_sharable(False, 'pattieblack', None)\n self.do_sharable(False, 'pattieblack', FakeMembership(True))", "def allow_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Allow access.\")\r\n self.helper._allow_access(share['name'], access, share['share_proto'])", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_dashboards_v2_share(self):\n pass", "def allow_access(self, context, share, access, share_server=None):\n self._get_helper(share).allow_access('/', share, access)", "def test_share(self):\n\n # In the actual test, we'll want to confirm that an IP address\n # can be shared to a group\n\n # Try to share with the group--fails for now (operation not\n # implemented in nova); note: change 1 to group, '10.0.0.1' to IP\n dtutil.assert_raises(novaclient.OpenStackException,\n self.server.share_ip, 1, '10.0.0.1', True)", "def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def deny_access(self, context, share, access, share_server=None):\n self._get_helper(share).deny_access('/', share, access)", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def share():\n return True", "def test_kyc_put_legal_share_holder(self):\n pass", "def test_auth_sharable(self):\n self.do_sharable(False, 'pattieblack', None, tenant='froggy')", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_kyc_post_legal_share_holder(self):\n pass", "def test_share_inactive_user(self):\n george = self.george\n alva = self.alva\n john = self.john\n bikes = self.bikes\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(alva),\n PrivilegeCodes.NONE)\n\n # inactive users can't be granted access\n # set john to an inactive user\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n john.is_active = True\n john.save()\n\n # inactive grantor can't grant access\n # let's first grant John access privilege\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(john),\n PrivilegeCodes.CHANGE)\n\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n john.uaccess.share_resource_with_user(\n bikes, alva, PrivilegeCodes.VIEW)", "def deny_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Deny access.\")\r\n self.helper._deny_access(share['name'], access, share['share_proto'])", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def _share():\n context = get_factcheck_context()\n return make_response(render_template('share.html', **context))", "def test_review_story_restrict_to_only_admin(self):\n self.client.post('/api/stories', headers={'token': user_token}, data=json.dumps(story1))\n res = self.client.put('/api/stories/1/review', headers={'token': user_token}, data=json.dumps({\n 'status': 'Approved'\n }))\n result = json.loads(res.data.decode())\n self.assertEqual(result['message'], 'Permission denied')\n self.assertEqual(res.status_code, 403)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def test_share_no_os_environ(self):\n with self.sys_exit_patch:\n with self.assertRaises(SystemExit):\n self.inst.share(\n \"test-container\",\n \"test-recipient\",\n \"r\",\n \"w\"\n )\n self.sys_exit_mock.assert_called_once()", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def check_instance_shared_storage_remote(self, context, data):\n raise NotImplementedError()", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_put_unauthorized(self):\n\n url = reverse('file')\n\n data = {\n 'shard_id': self.shard1.id,\n 'link_id': \"b8866161-0b1f-4a8e-acde-07047313ec8f\",\n 'parent_datastore_id': str(self.test_datastore_obj.id),\n 'chunk_count': 1,\n 'size': 512,\n }\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def check_instance_shared_storage_local(self, context, instance):\n raise NotImplementedError()", "def test_aws_service_api_private_images_get(self):\n pass", "def require_share_exists(f):\n @wraps(f)\n def wrapper(context, share_id, *args, **kwargs):\n share_get(context, share_id)\n return f(context, share_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def test_aws_service_api_private_image_get(self):\n pass", "def test_test_nas_share(self):\n pass", "async def test_push_share_no_os_envars(self):\n with self.sys_exit_patch, \\\n self.patch_init_sharing_client_error, \\\n self.patch_get_address, \\\n self.os_environ_get_patch:\n with self.assertRaises(SystemExit):\n await self.inst._push_share(\n \"test-container\",\n \"test-recipient\",\n [\"r\", \"w\"]\n )", "def test_dashboards_v2_delete_share(self):\n pass", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def _deny_access(self, context, share, access, share_server=None):\n if access['access_type'] != 'ip':\n LOG.debug('Quobyte driver only supports ip access control. '\n 'Ignoring deny access call for %s , %s',\n share['name'],\n self._get_project_name(context, share['project_id']))\n return\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n call_params = {\n \"volume_uuid\": volume_uuid,\n \"remove_allow_ip\": access['access_to']}\n self.rpc.call('exportVolume', call_params)", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_update_non_shareable(self):\n self.create_common_users_and_groups()\n sync = SyncUserAndGroups(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n auag = sync.get_all_users_and_groups()\n\n # Need to remove the common users since they don't have emails.\n auag.remove_user(\"guest\")\n auag.remove_user(\"su\")\n auag.remove_user(\"system\")\n auag.remove_user(\"tsadmin\")\n\n # Change Group 1 and Group 2 and verify change took.\n group1 = auag.get_group(\"Group 1\")\n group1.visibility = Visibility.NON_SHAREABLE\n group2 = auag.get_group(\"Group 2\")\n group2.visibility = Visibility.DEFAULT\n\n # sync updates\n sync.sync_users_and_groups(users_and_groups=auag)\n\n # verify changes\n auag = sync.get_all_users_and_groups()\n self.assertEqual(\n auag.get_group(\"Group 1\").visibility, Visibility.NON_SHAREABLE\n )\n self.assertEqual(\n auag.get_group(\"Group 2\").visibility, Visibility.DEFAULT\n )\n self.assertEqual(\n auag.get_group('Group \"3\"').visibility, Visibility.NON_SHAREABLE\n )", "def test_new_share(self):\n \n test_user_with_checkpoint = self.create_saved_test_user_with_checkpoint()\n another_test_user_to_share = self.create_saved_test_user()\n \n data = {\"user_id\": test_user_with_checkpoint.user_obj.id,\n \"to_user_id\": another_test_user_to_share.user_obj.id,\n \"signature\": gen_signature(\"put\",\n \"share\",\n gen_api_key(test_user_with_checkpoint.user_obj.access_token, \n test_user_with_checkpoint.user_obj.id)),\n \"user_checkpoint_id\": test_user_with_checkpoint.user_checkpoint_obj.id\n }\n \n resp = self.client.put(\"/share/\", data=data)\n assert \"ok\" in resp.data\n assert not get_share_w_attr(test_user_with_checkpoint.user_obj, \n another_test_user_to_share.user_obj, \n test_user_with_checkpoint.user_checkpoint_obj) is None", "def _allow_access(self, context, share, access, share_server=None):\n if access['access_type'] != 'ip':\n raise exception.InvalidShareAccess(\n _('Quobyte driver only supports ip access control'))\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n ro = access['access_level'] == (constants.ACCESS_LEVEL_RO)\n call_params = {\n \"volume_uuid\": volume_uuid,\n \"read_only\": ro,\n \"add_allow_ip\": access['access_to']}\n self.rpc.call('exportVolume', call_params)", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_logged_in_friend_not_in_group(self):\n\n self.make_logged_in_friend_not_in_group()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_ALLFRIENDS)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_ALLFRIENDS)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_ALLFRIENDS)", "def test_wrong_config_shares0(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share='dfdf'\n ),\n status=400\n )", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"fakeuser@opencloud.us\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def test_share_01(self, mocker):\n contributors = ['alice@gmail.com']\n g = GSheets(self.fake.file_path(depth=1, category=None, extension='json'), contributors)\n g.spreadsheet = Spreadsheet(None, None)\n g.spreadsheet.share = mocker.MagicMock()\n\n g.share()\n\n assert not g.spreadsheet.share.called", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def photo_shared(request):\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tallwd_friends = request.session.get(\"personal_group_shared_photo_allwd_friends\",None)\n\tdisallwd_friends = request.session.get(\"personal_group_shared_photo_disallwd_friends\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_shared.html\",{'allwd_friends':allwd_friends,'disallwd_friends':disallwd_friends,\\\n\t\t'own_uname':retrieve_uname(request.user.id,decode=True),'origin':origin,'photo_caption':photo_caption,'num_sent':len(allwd_friends),\\\n\t\t'num_unsent':len(disallwd_friends),'photo_url':photo_url,'photo_id':photo_id,'photo_owner_username':photo_owner_username})", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_wrong_config_shares2(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='dfgsdfsg',\n destination='gsiftp://nowhere',\n vo='dteam',\n share=80\n ),\n status=400\n )\n\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='klhjkhjk',\n vo='dteam',\n share=80\n ),\n status=400\n )", "def test_04_self_unshare_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.unshare_group_with_user(meowers, dog)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertFalse(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_group_unshare_users(meowers)))", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def every_existing_owner_should_have_valid_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if owner['user_type'] == 'does_not_exist':\n continue\n link = owner['profile_image']\n assert validators.url(link), (\n 'Owner %s (%d) in item %d has an invalid profile image link: %s'\n .format(owner['display_name'], owner['user_id'], link))\n logging.debug(\n 'Owner %s (%d) has a valid profile image link: %s',\n owner['display_name'], owner['user_id'], link)", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_mount_status_nas_share_by_pool(self):\n pass", "def test_itar_restrict_asset(self):\n pass", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_test_result_nas_share(self):\n pass", "def require_share_instance_exists(f):\n @wraps(f)\n def wrapper(context, share_instance_id, *args, **kwargs):\n share_instance_get(context, share_instance_id)\n return f(context, share_instance_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def test_show_nas_share(self):\n pass", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_itar_restrict_test_asset(self):\n pass", "def protect_share_item(portal_item):\r\n # protect portal item from deletion\r\n portal_item.protect(enable=True)\r\n print('protecting portal item of', portal_item.title, 'from deletion')\r\n\r\n # share portal item in the organization\r\n portal_item.share('org')\r\n print('sharing', portal_item.title, 'in organization')", "def test_set_share(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share=80\n ),\n status=200\n )", "def test_share_38(self):\n with self.os_environ_get_patch, \\\n self.patch_push_share, \\\n self.patch_subprocess_call, \\\n self.sys_version_38_patch:\n self.inst.share(\n \"test-container\",\n \"test-recipient\",\n \"r\",\n \"w\"\n )\n self.subprocess_call_mock.assert_called_once()\n self.push_share_mock.assert_awaited_once()", "def share(config: Config, ami: str, account: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n ec2_client.modify_image_attribute(\n ImageId=ami,\n LaunchPermission={\"Add\": [{\"UserId\": account}]},\n OperationType=\"add\",\n UserIds=[account],\n Value=\"string\",\n DryRun=False,\n )", "def require_share_snapshot_exists(f):\n @wraps(f)\n def wrapper(context, share_snapshot_id, *args, **kwargs):\n share_snapshot_get(context, share_snapshot_id)\n return f(context, share_snapshot_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def test_upsert_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_attachment_deletion_allowed_no_instance(self, mock_get_server):\n attachment = self._get_attachment(with_instance_id=False)\n self.volume_api.attachment_deletion_allowed(self.context, attachment)\n mock_get_server.assert_not_called()" ]
[ "0.7128801", "0.6828019", "0.67746824", "0.67188835", "0.6670886", "0.64948475", "0.63549715", "0.63543147", "0.61875457", "0.6095076", "0.6056876", "0.60101897", "0.600731", "0.59870404", "0.59508693", "0.5939599", "0.5927158", "0.58969176", "0.58855337", "0.5838346", "0.58140796", "0.58089846", "0.57899266", "0.5745943", "0.5745672", "0.569453", "0.56840736", "0.5682218", "0.56606853", "0.5642707", "0.5632864", "0.56325877", "0.5631727", "0.5606759", "0.56063414", "0.5595998", "0.5583587", "0.5572959", "0.5569698", "0.5548858", "0.5543628", "0.55414516", "0.5537752", "0.5537719", "0.5515206", "0.5503514", "0.55010056", "0.5491497", "0.5485351", "0.54821754", "0.5476625", "0.5468488", "0.54537", "0.54431343", "0.5407889", "0.54065305", "0.5394256", "0.5387814", "0.5381039", "0.53690374", "0.53665495", "0.5359849", "0.53579736", "0.53523946", "0.53523946", "0.53501755", "0.5338136", "0.5336347", "0.5320852", "0.5319633", "0.5317661", "0.5317295", "0.53088814", "0.5306805", "0.53006524", "0.53006524", "0.5299432", "0.5293563", "0.5268558", "0.52660084", "0.5263944", "0.52554333", "0.5253629", "0.5251306", "0.5247375", "0.523389", "0.5233638", "0.5227983", "0.52259374", "0.5225063", "0.522274", "0.52165735", "0.52130157", "0.5203965", "0.51984215", "0.51864004", "0.51652765", "0.5161891", "0.5159977", "0.5157381" ]
0.55289805
44
Tests that an authenticated context (with is_admin set to False) can share an image it owns, even if it is not shared with it.
def test_auth_sharable_owned(self): self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def canShare(self):\n return False", "def test_auth_sharable_can_share(self):\n self.do_sharable(True, 'pattieblack', FakeMembership(True),\n tenant='froggy')", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def ensure_share(self, context, share, share_server=None):\n pass", "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def ensure_share(self, context, share, share_server=None):\r\n LOG.debug(\"Ensure share.\")", "def cli(env, identifier, account_id):\n\n image_mgr = SoftLayer.ImageManager(env.client)\n image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')\n shared_image = image_mgr.share_image(image_id, account_id)\n\n if shared_image:\n env.fout(f\"Image template {identifier} was shared to account {account_id}.\")", "def media_image_remotely_accessible(self) -> bool:\n return True", "def cant_share_photo(request, ttl=None,*args, **kwargs):\n\tif ttl:\n\t\ttry:\n\t\t\tttl = int(ttl)\n\t\texcept ValueError:\n\t\t\tttl = None\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_not_shared.html\",{'photo_caption':photo_caption,'photo_id':photo_id,'photo_url':photo_url,\\\n\t\t'photo_owner_username':photo_owner_username,'origin':origin,'ttl':ttl})", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_anon_shared(self):\n self.do_sharable(False, 'pattieblack', None)\n self.do_sharable(False, 'pattieblack', FakeMembership(True))", "def allow_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Allow access.\")\r\n self.helper._allow_access(share['name'], access, share['share_proto'])", "def share():\n return True", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_share(self):\n\n # In the actual test, we'll want to confirm that an IP address\n # can be shared to a group\n\n # Try to share with the group--fails for now (operation not\n # implemented in nova); note: change 1 to group, '10.0.0.1' to IP\n dtutil.assert_raises(novaclient.OpenStackException,\n self.server.share_ip, 1, '10.0.0.1', True)", "def test_empty_shared(self):\n self.do_sharable(False, 'pattieblack', None, is_admin=True)\n self.do_sharable(False, 'pattieblack', FakeMembership(True),\n is_admin=True)", "def allow_access(self, context, share, access, share_server=None):\n self._get_helper(share).allow_access('/', share, access)", "def check_instance_shared_storage_remote(self, context, data):\n raise NotImplementedError()", "def test_dashboards_v2_share(self):\n pass", "def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))", "def _share():\n context = get_factcheck_context()\n return make_response(render_template('share.html', **context))", "def check_instance_shared_storage_local(self, context, instance):\n raise NotImplementedError()", "def photo_shared(request):\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tallwd_friends = request.session.get(\"personal_group_shared_photo_allwd_friends\",None)\n\tdisallwd_friends = request.session.get(\"personal_group_shared_photo_disallwd_friends\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_shared.html\",{'allwd_friends':allwd_friends,'disallwd_friends':disallwd_friends,\\\n\t\t'own_uname':retrieve_uname(request.user.id,decode=True),'origin':origin,'photo_caption':photo_caption,'num_sent':len(allwd_friends),\\\n\t\t'num_unsent':len(disallwd_friends),'photo_url':photo_url,'photo_id':photo_id,'photo_owner_username':photo_owner_username})", "def test_share_inactive_user(self):\n george = self.george\n alva = self.alva\n john = self.john\n bikes = self.bikes\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(alva),\n PrivilegeCodes.NONE)\n\n # inactive users can't be granted access\n # set john to an inactive user\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n john.is_active = True\n john.save()\n\n # inactive grantor can't grant access\n # let's first grant John access privilege\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(john),\n PrivilegeCodes.CHANGE)\n\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n john.uaccess.share_resource_with_user(\n bikes, alva, PrivilegeCodes.VIEW)", "def every_existing_owner_should_have_valid_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if owner['user_type'] == 'does_not_exist':\n continue\n link = owner['profile_image']\n assert validators.url(link), (\n 'Owner %s (%d) in item %d has an invalid profile image link: %s'\n .format(owner['display_name'], owner['user_id'], link))\n logging.debug(\n 'Owner %s (%d) has a valid profile image link: %s',\n owner['display_name'], owner['user_id'], link)", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_aws_service_api_private_images_get(self):\n pass", "def test_kyc_put_legal_share_holder(self):\n pass", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def share_image(self):\n portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state')\n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IFbShareSettings, check=False)\n\n if settings.content_use_own_image:\n # Stolen from collective.opengraph\n img_size = settings.content_image_size\n context = aq_inner(self.context)\n obj_url = context.absolute_url()\n if hasattr(context, 'getField'):\n field = self.context.getField('image')\n if not field and HAS_LEADIMAGE:\n field = context.getField(IMAGE_FIELD_NAME)\n \n if field and field.get_size(context) > 0:\n if img_size:\n return u'%s/%s_%s' % (obj_url, field.getName(), img_size)\n return u'%s/%s' % (obj_url, field.getName())\n \n return SiteOpenGraphMetaViewlet.share_image(self)", "def can_be_accessed(self, user):\n if self.shared_with_everyone:\n return True\n\n if self.user == user or self.users_allowed.filter(pk=user.pk).exists():\n return True\n\n for group in self.groups_allowed.all():\n if user.groups.filter(pk=group.pk).exists():\n return True\n\n return False", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_auth_sharable(self):\n self.do_sharable(False, 'pattieblack', None, tenant='froggy')", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_new_share(self):\n \n test_user_with_checkpoint = self.create_saved_test_user_with_checkpoint()\n another_test_user_to_share = self.create_saved_test_user()\n \n data = {\"user_id\": test_user_with_checkpoint.user_obj.id,\n \"to_user_id\": another_test_user_to_share.user_obj.id,\n \"signature\": gen_signature(\"put\",\n \"share\",\n gen_api_key(test_user_with_checkpoint.user_obj.access_token, \n test_user_with_checkpoint.user_obj.id)),\n \"user_checkpoint_id\": test_user_with_checkpoint.user_checkpoint_obj.id\n }\n \n resp = self.client.put(\"/share/\", data=data)\n assert \"ok\" in resp.data\n assert not get_share_w_attr(test_user_with_checkpoint.user_obj, \n another_test_user_to_share.user_obj, \n test_user_with_checkpoint.user_checkpoint_obj) is None", "def require_share_exists(f):\n @wraps(f)\n def wrapper(context, share_id, *args, **kwargs):\n share_get(context, share_id)\n return f(context, share_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def test_kyc_post_legal_share_holder(self):\n pass", "def share(config: Config, ami: str, account: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n ec2_client.modify_image_attribute(\n ImageId=ami,\n LaunchPermission={\"Add\": [{\"UserId\": account}]},\n OperationType=\"add\",\n UserIds=[account],\n Value=\"string\",\n DryRun=False,\n )", "def test_album_image_user(self):\n self.assertEqual(self.album.user, self.photo.user)", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def _allow_access(self, context, share, access, share_server=None):\n if access['access_type'] != 'ip':\n raise exception.InvalidShareAccess(\n _('Quobyte driver only supports ip access control'))\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n ro = access['access_level'] == (constants.ACCESS_LEVEL_RO)\n call_params = {\n \"volume_uuid\": volume_uuid,\n \"read_only\": ro,\n \"add_allow_ip\": access['access_to']}\n self.rpc.call('exportVolume', call_params)", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_share_no_os_environ(self):\n with self.sys_exit_patch:\n with self.assertRaises(SystemExit):\n self.inst.share(\n \"test-container\",\n \"test-recipient\",\n \"r\",\n \"w\"\n )\n self.sys_exit_mock.assert_called_once()", "def test_logged_in_contributor(self):\n self.make_logged_in_contributor()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PRIVATE)", "def test_auth_sharable_admin(self):\n self.do_sharable(True, 'pattieblack', None, tenant='froggy',\n is_admin=True)", "def create_image_allowed(self):\n return self._create_image_allowed", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_api_thumbnail_retrieve_by_organization_administrator(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=ADMINISTRATOR,\n )\n\n self.assert_user_can_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"fakeuser@opencloud.us\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def share_photo_in_personal_group(request):\n\tif request.method == \"POST\":\n\t\tuser_id = request.user.id\n\t\tdecision_made = request.POST.get(\"dm\",None)\n\t\tnew_title = request.POST.get(\"nt\",None)\n\t\tif new_title:\n\t\t\t# before processing, ensure this user's all photos aren't banned:\n\t\t\tphoto_owner_id = request.session.get(\"personal_group_shared_photo_owner_id\",None)\n\t\t\tbanned, time_remaining = check_photo_upload_ban(photo_owner_id)\n\t\t\tif banned:\n\t\t\t\treturn redirect(\"cant_share_photo\")\n\t\t\telif not banned:\n\t\t\t\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\t\t\t\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\t\t\t\tif request.POST.get('dec',None) == '1':\n\t\t\t\t\tform = PersonalGroupSharedPhotoCaptionForm(request.POST)\n\t\t\t\t\tif form.is_valid():\n\t\t\t\t\t\t# change photo caption\n\t\t\t\t\t\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\t\t\t\t\t\tgroup_ids = request.session.get(\"personal_group_shared_photo_group_ids\",None)\n\t\t\t\t\t\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\t\t\t\t\t\tnew_photo_caption = form.cleaned_data.get(\"text\")\n\t\t\t\t\t\tis_limited, cooloff_time = get_rate_limit_in_personal_group_sharing(user_id)\n\t\t\t\t\t\tif is_limited:\n\t\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\t\t\t\telif not is_limited:\n\t\t\t\t\t\t\tallwd_grps, disallwd_grps = post_shared_photo_to_personal_groups(group_ids,photo_url,new_photo_caption,photo_id,\\\n\t\t\t\t\t\t\t\tphoto_owner_username,user_id, photo_owner_id)\n\t\t\t\t\t\t\ttargeted_friends = request.session.get(\"personal_group_shared_photo_group_contents\",None)\n\t\t\t\t\t\t\tallwd_friends, disallwd_friends = [], []\n\t\t\t\t\t\t\tfor group_id in allwd_grps:\n\t\t\t\t\t\t\t\tallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\t\tfor group_id in disallwd_grps:\n\t\t\t\t\t\t\t\tdisallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_caption\"] = new_photo_caption\n\t\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_allwd_friends\"] = allwd_friends\n\t\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_disallwd_friends\"] = disallwd_friends\n\t\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\t\treturn redirect(\"photo_shared\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# validation error when trying to change photo caption\n\t\t\t\t\t\tcontext = {'photo_url':photo_url,'edit_caption':True,'photo_caption':photo_caption,'form':form}\n\t\t\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\t\t\telse:\n\t\t\t\t\t# user pressed 'skip' - i.e. no change in photo caption\n\t\t\t\t\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\t\t\t\t\tgroup_ids = request.session.get(\"personal_group_shared_photo_group_ids\",None)\n\t\t\t\t\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\t\t\t\t\tis_limited, cooloff_time = get_rate_limit_in_personal_group_sharing(user_id)\n\t\t\t\t\tif is_limited:\n\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\t\t\telif not is_limited:\n\t\t\t\t\t\tallwd_grps, disallwd_grps = post_shared_photo_to_personal_groups(group_ids,photo_url,photo_caption,photo_id,\\\n\t\t\t\t\t\t\tphoto_owner_username,user_id, photo_owner_id)\n\t\t\t\t\t\ttargeted_friends = request.session.get(\"personal_group_shared_photo_group_contents\",None)\n\t\t\t\t\t\tallwd_friends, disallwd_friends = [], []\n\t\t\t\t\t\tfor group_id in allwd_grps:\n\t\t\t\t\t\t\tallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\tfor group_id in disallwd_grps:\n\t\t\t\t\t\t\tdisallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_allwd_friends\"] = allwd_friends\n\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_disallwd_friends\"] = disallwd_friends\n\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\treturn redirect(\"photo_shared\")\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\telse:\n\t\t\t\treturn redirect(\"cant_share_photo\")\n\t\telif decision_made:\n\t\t\tgroups = request.POST.getlist('gid',None)# contains group_ids in list format\n\t\t\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\t\t\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\t\t\tif groups:\n\t\t\t\tif len(groups) > PHOTO_SHARING_FRIEND_LIMIT:\n\t\t\t\t\t# return to select friends screen, alongwith message asking user to select lesser friends\n\t\t\t\t\tcontext = {'must_select_less':True,'limit':PHOTO_SHARING_FRIEND_LIMIT,'photo_url':photo_url,'photo_caption':photo_caption}\n\t\t\t\t\tgroup_and_friend = get_user_friend_list(user_id)\n\t\t\t\t\tif not group_and_friend:\n\t\t\t\t\t\tcontext[\"no_friends\"] = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontext[\"friend_data\"] = group_and_friend\n\t\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\t\t\telse:\n\t\t\t\t\tgroup_ids, group_contents = [], {}\n\t\t\t\t\tfor group in groups:\n\t\t\t\t\t\tdata = group.split(\":\",4)\n\t\t\t\t\t\tgroup_id = data[0]\n\t\t\t\t\t\tgroup_ids.append(group_id)\n\t\t\t\t\t\tgroup_contents[group_id] = {'friend_uname':data[4],'friend_avurl':data[2],'friend_id':data[3],'is_anon':data[1]}\n\t\t\t\t\trequest.session[\"personal_group_shared_photo_group_ids\"] = group_ids\n\t\t\t\t\trequest.session[\"personal_group_shared_photo_group_contents\"] = group_contents\n\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\tcontext = {'photo_url':photo_url,'edit_caption':True,'photo_caption':photo_caption,'form':PersonalGroupSharedPhotoCaptionForm()}\n\t\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\t\telse:\n\t\t\t\t# return to select friends screen, alongwith message asking user to at least select 1 friend\n\t\t\t\tcontext = {'must_select_one':True,'photo_url':photo_url,'photo_caption':photo_caption}\n\t\t\t\tgroup_and_friend = get_user_friend_list(user_id)\n\t\t\t\tif not group_and_friend:\n\t\t\t\t\tcontext[\"no_friends\"] = True\n\t\t\t\telse:\n\t\t\t\t\tcontext[\"friend_data\"] = group_and_friend\n\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\telse:\n\t\t\tpayload = request.POST.get(\"pl\").split(\":\",4)#maxsplit set to 4 to ensure caption containing ':' is not split\n\t\t\towner_username, photo_id, origin, owner_id, photo_caption = payload[0], payload[1], payload[2], payload[3], payload[4]\n\t\t\tphoto_url = request.POST.get(\"purl\")\n\t\t\trequest.session[\"personal_group_shared_photo_id\"] = photo_id\n\t\t\trequest.session[\"personal_group_shared_photo_url\"] = photo_url\n\t\t\trequest.session[\"personal_group_shared_photo_origin\"] = origin\n\t\t\trequest.session[\"personal_group_shared_photo_owner_id\"] = owner_id\n\t\t\trequest.session[\"personal_group_shared_photo_caption\"] = photo_caption\n\t\t\trequest.session[\"personal_group_shared_photo_owner_username\"] = owner_username\n\t\t\trequest.session.modified = True\n\t\t\tcontext = {'photo_url':photo_url,'photo_caption':photo_caption,'limit':PHOTO_SHARING_FRIEND_LIMIT,'origin':origin,'photo_id':photo_id,\\\n\t\t\t'owner_username':owner_username}\n\t\t\tif tutorial_unseen(user_id=user_id, which_tut='3', renew_lease=True):\n\t\t\t\tcontext[\"show_first_time_tutorial\"] = True\n\t\t\tgroup_and_friend = get_user_friend_list(user_id)\n\t\t\tif not group_and_friend:\n\t\t\t\tcontext[\"no_friends\"] = True\n\t\t\telse:\n\t\t\t\tcontext[\"friend_data\"] = group_and_friend\n\t\t\t\tcontext[\"num_friends\"] = len(group_and_friend)\n\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\telse:\n\t\treturn redirect(\"missing_page\")", "def userOwnsAsset(userId, assetId):\n url = f\"https://api.roblox.com/ownership/hasasset?assetId={assetId}&userId={userId}\"\n r = requests.get(url)\n if r.text == 'true':\n return True\n else:\n return False", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def shared_by(self, user):\n return Shares.objects.filter(recipe=self, chef=user).exists()", "def test_mount_status_nas_share_by_pool(self):\n pass", "def test_review_story_restrict_to_only_admin(self):\n self.client.post('/api/stories', headers={'token': user_token}, data=json.dumps(story1))\n res = self.client.put('/api/stories/1/review', headers={'token': user_token}, data=json.dumps({\n 'status': 'Approved'\n }))\n result = json.loads(res.data.decode())\n self.assertEqual(result['message'], 'Permission denied')\n self.assertEqual(res.status_code, 403)", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_test_nas_share(self):\n pass", "def test_share_38(self):\n with self.os_environ_get_patch, \\\n self.patch_push_share, \\\n self.patch_subprocess_call, \\\n self.sys_version_38_patch:\n self.inst.share(\n \"test-container\",\n \"test-recipient\",\n \"r\",\n \"w\"\n )\n self.subprocess_call_mock.assert_called_once()\n self.push_share_mock.assert_awaited_once()", "def test_share_01(self, mocker):\n contributors = ['alice@gmail.com']\n g = GSheets(self.fake.file_path(depth=1, category=None, extension='json'), contributors)\n g.spreadsheet = Spreadsheet(None, None)\n g.spreadsheet.share = mocker.MagicMock()\n\n g.share()\n\n assert not g.spreadsheet.share.called", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def require_share_instance_exists(f):\n @wraps(f)\n def wrapper(context, share_instance_id, *args, **kwargs):\n share_instance_get(context, share_instance_id)\n return f(context, share_instance_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def test_should_render_for_owner(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context()))", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def permits(identity, obj, permission):\n return False", "async def permits(self, identity, permission, context=None):\r\n return self.authorized_userid(identity) is not None", "def test_profile_image_requested_field(self):\n user_2 = UserFactory.create(password=self.password)\n # Ensure that parental controls don't apply to this user\n user_2.profile.year_of_birth = 1970\n user_2.profile.save()\n source_threads = [\n self.create_source_thread(),\n self.create_source_thread({\"user_id\": str(user_2.id), \"username\": user_2.username}),\n ]\n\n self.register_get_user_response(self.user, upvoted_ids=[\"test_thread\"])\n self.register_get_threads_response(source_threads, page=1, num_pages=1)\n self.create_profile_image(self.user, get_profile_image_storage())\n self.create_profile_image(user_2, get_profile_image_storage())\n\n response = self.client.get(\n self.url,\n {\"course_id\": str(self.course.id), \"requested_fields\": \"profile_image\"},\n )\n assert response.status_code == 200\n response_threads = json.loads(response.content.decode('utf-8'))['results']\n\n for response_thread in response_threads:\n expected_profile_data = self.get_expected_user_profile(response_thread['author'])\n response_users = response_thread['users']\n assert expected_profile_data == response_users[response_thread['author']]", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_set_share(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share=80\n ),\n status=200\n )", "async def test_edit_sharing_data(container_requester):\n async with container_requester as requester:\n await requester(\n 'POST', '/db/guillotina', data=json.dumps({\n '@type': 'Item',\n 'id': 'foobar1'\n }))\n await requester(\n 'POST', '/db/guillotina', data=json.dumps({\n '@type': 'Item',\n 'id': 'foobar2'\n }))\n response, _ = await requester(\n 'POST',\n '/db/guillotina/@batch',\n data=json.dumps([{\n 'method': 'POST',\n 'endpoint': 'foobar1/@sharing',\n 'payload': {\n \"prinperm\": [{\n \"principal\": \"user1\",\n \"permission\": \"guillotina.AccessContent\",\n \"setting\": \"AllowSingle\"\n }]\n }\n }, {\n 'method': 'POST',\n 'endpoint': 'foobar2/@sharing',\n 'payload': {\n \"prinperm\": [{\n \"principal\": \"user1\",\n \"permission\": \"guillotina.AccessContent\",\n \"setting\": \"AllowSingle\"\n }]\n }\n }])\n )\n response, _ = await requester(\n 'POST',\n '/db/guillotina/@batch',\n data=json.dumps([{\n 'method': 'GET',\n 'endpoint': 'foobar1/@sharing'\n }, {\n 'method': 'GET',\n 'endpoint': 'foobar2/@sharing'\n }])\n )\n assert len(response) == 2\n assert response[0]['body']['local']['prinperm']['user1']['guillotina.AccessContent'] == 'AllowSingle'\n assert response[1]['body']['local']['prinperm']['user1']['guillotina.AccessContent'] == 'AllowSingle'", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])", "def test_should_render_for_owner(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n can_edit_reviewrequest=False)))", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def is_screenshare(self):\n return self._is_screenshare", "def test_put_unauthorized(self):\n\n url = reverse('file')\n\n data = {\n 'shard_id': self.shard1.id,\n 'link_id': \"b8866161-0b1f-4a8e-acde-07047313ec8f\",\n 'parent_datastore_id': str(self.test_datastore_obj.id),\n 'chunk_count': 1,\n 'size': 512,\n }\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def require_share_snapshot_exists(f):\n @wraps(f)\n def wrapper(context, share_snapshot_id, *args, **kwargs):\n share_snapshot_get(context, share_snapshot_id)\n return f(context, share_snapshot_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def share_image(self):\n portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state')\n \n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IFbShareSettings, check=False)\n if settings.image_to_share==u'site_logo':\n portal = self.portal_state.portal()\n logoName = portal.restrictedTraverse('base_properties').logoName\n return \"%s/%s\" % (portal_state.portal_url(), logoName)\n \n share_image_view = getMultiAdapter((portal_state.portal(), self.request),\n name=u'collective.fbshare.default_image')\n if share_image_view.data():\n return \"%s/@@collective.fbshare.default_image\" % portal_state.portal_url()", "def test_owner_create_blogpost_as_other_user(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n another_user = UserFactory.create()\r\n app = AppFactory.create()\r\n blogpost = BlogpostFactory.build(app_id=app.id,\r\n owner=another_user)\r\n\r\n assert self.mock_authenticated.id == app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').create, blogpost)", "def test_profile_image_requested_field(self):\n source_comments = [self.create_source_comment()]\n self.register_get_thread_response({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"thread_type\": \"discussion\",\n \"children\": source_comments,\n \"resp_total\": 100,\n })\n self.register_get_user_response(self.user, upvoted_ids=[\"test_comment\"])\n self.create_profile_image(self.user, get_profile_image_storage())\n\n response = self.client.get(self.url, {\"thread_id\": self.thread_id, \"requested_fields\": \"profile_image\"})\n assert response.status_code == 200\n response_comments = json.loads(response.content.decode('utf-8'))['results']\n for response_comment in response_comments:\n expected_profile_data = self.get_expected_user_profile(response_comment['author'])\n response_users = response_comment['users']\n assert expected_profile_data == response_users[response_comment['author']]", "def test_client_photo_view(self):\r\n response = self.test_client.get(reverse('client_photo'))\r\n self.assertEqual(response.status_code, 200)\r\n self.assertTrue(self.client1.get_client_photo_data()\r\n in response.context['clients'])" ]
[ "0.6706358", "0.6633116", "0.6622457", "0.66168797", "0.6576476", "0.6559533", "0.62385696", "0.6178727", "0.6154038", "0.60761106", "0.60420334", "0.59743327", "0.5963598", "0.58454823", "0.5845463", "0.5824839", "0.57912004", "0.5788092", "0.57464755", "0.5700856", "0.5699289", "0.5650332", "0.5588469", "0.55831426", "0.55749977", "0.55662173", "0.5548477", "0.55440956", "0.5515592", "0.55026627", "0.5492556", "0.5485021", "0.5475945", "0.54756504", "0.5473645", "0.5468482", "0.544419", "0.54369724", "0.54333574", "0.54119074", "0.53922933", "0.53863496", "0.5386295", "0.5381452", "0.5380921", "0.536819", "0.5353201", "0.53523487", "0.5342934", "0.532818", "0.532473", "0.5319526", "0.53116024", "0.5309566", "0.53066045", "0.5302029", "0.528342", "0.5280641", "0.5279725", "0.5279341", "0.5276463", "0.5263447", "0.5261233", "0.52602226", "0.52547747", "0.52461755", "0.5245025", "0.524485", "0.5242813", "0.5228746", "0.5223109", "0.52161914", "0.5215318", "0.52074003", "0.5206743", "0.5206728", "0.5205972", "0.5195471", "0.5181921", "0.517058", "0.51670533", "0.5166221", "0.51632845", "0.51572144", "0.5150671", "0.5145124", "0.51387393", "0.5138407", "0.5130721", "0.51248324", "0.51139534", "0.51106095", "0.5105523", "0.5102579", "0.51025397", "0.51025397", "0.51018775", "0.50974786", "0.50973934", "0.50945956" ]
0.6179839
7
Tests that an authenticated context (with is_admin set to False) cannot share an image it does not own even if it is shared with it, but with can_share = False.
def test_auth_sharable_cannot_share(self): self.do_sharable(False, 'pattieblack', FakeMembership(False), tenant='froggy')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensure_share(self, context, share, share_server=None):\n pass", "def ensure_share(self, context, share, share_server=None):\r\n LOG.debug(\"Ensure share.\")", "def cant_share_photo(request, ttl=None,*args, **kwargs):\n\tif ttl:\n\t\ttry:\n\t\t\tttl = int(ttl)\n\t\texcept ValueError:\n\t\t\tttl = None\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_not_shared.html\",{'photo_caption':photo_caption,'photo_id':photo_id,'photo_url':photo_url,\\\n\t\t'photo_owner_username':photo_owner_username,'origin':origin,'ttl':ttl})", "def canShare(self):\n return False", "def test_auth_sharable_can_share(self):\n self.do_sharable(True, 'pattieblack', FakeMembership(True),\n tenant='froggy')", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def deny_access(self, context, share, access, share_server=None):\n self._get_helper(share).deny_access('/', share, access)", "def deny_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Deny access.\")\r\n self.helper._deny_access(share['name'], access, share['share_proto'])", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_anon_shared(self):\n self.do_sharable(False, 'pattieblack', None)\n self.do_sharable(False, 'pattieblack', FakeMembership(True))", "def allow_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Allow access.\")\r\n self.helper._allow_access(share['name'], access, share['share_proto'])", "def test_empty_shared(self):\n self.do_sharable(False, 'pattieblack', None, is_admin=True)\n self.do_sharable(False, 'pattieblack', FakeMembership(True),\n is_admin=True)", "def test_auth_sharable_owned(self):\n self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def allow_access(self, context, share, access, share_server=None):\n self._get_helper(share).allow_access('/', share, access)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_kyc_post_legal_share_holder(self):\n pass", "def test_dashboards_v2_share(self):\n pass", "def test_wrong_config_shares0(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share='dfdf'\n ),\n status=400\n )", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def share():\n return True", "def test_share(self):\n\n # In the actual test, we'll want to confirm that an IP address\n # can be shared to a group\n\n # Try to share with the group--fails for now (operation not\n # implemented in nova); note: change 1 to group, '10.0.0.1' to IP\n dtutil.assert_raises(novaclient.OpenStackException,\n self.server.share_ip, 1, '10.0.0.1', True)", "def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))", "def test_auth_sharable(self):\n self.do_sharable(False, 'pattieblack', None, tenant='froggy')", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_wrong_config_shares2(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='dfgsdfsg',\n destination='gsiftp://nowhere',\n vo='dteam',\n share=80\n ),\n status=400\n )\n\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='klhjkhjk',\n vo='dteam',\n share=80\n ),\n status=400\n )", "def test_kyc_put_legal_share_holder(self):\n pass", "def _deny_access(self, context, share, access, share_server=None):\n if access['access_type'] != 'ip':\n LOG.debug('Quobyte driver only supports ip access control. '\n 'Ignoring deny access call for %s , %s',\n share['name'],\n self._get_project_name(context, share['project_id']))\n return\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n call_params = {\n \"volume_uuid\": volume_uuid,\n \"remove_allow_ip\": access['access_to']}\n self.rpc.call('exportVolume', call_params)", "def test_cant_create_image_unauthorized(self):\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(IMAGE_URL, data={'image': ntf},\n format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def _share():\n context = get_factcheck_context()\n return make_response(render_template('share.html', **context))", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def cli(env, identifier, account_id):\n\n image_mgr = SoftLayer.ImageManager(env.client)\n image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')\n shared_image = image_mgr.share_image(image_id, account_id)\n\n if shared_image:\n env.fout(f\"Image template {identifier} was shared to account {account_id}.\")", "def require_share_exists(f):\n @wraps(f)\n def wrapper(context, share_id, *args, **kwargs):\n share_get(context, share_id)\n return f(context, share_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def test_share_inactive_user(self):\n george = self.george\n alva = self.alva\n john = self.john\n bikes = self.bikes\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(alva),\n PrivilegeCodes.NONE)\n\n # inactive users can't be granted access\n # set john to an inactive user\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n john.is_active = True\n john.save()\n\n # inactive grantor can't grant access\n # let's first grant John access privilege\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(john),\n PrivilegeCodes.CHANGE)\n\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n john.uaccess.share_resource_with_user(\n bikes, alva, PrivilegeCodes.VIEW)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def photo_shared(request):\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tallwd_friends = request.session.get(\"personal_group_shared_photo_allwd_friends\",None)\n\tdisallwd_friends = request.session.get(\"personal_group_shared_photo_disallwd_friends\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_shared.html\",{'allwd_friends':allwd_friends,'disallwd_friends':disallwd_friends,\\\n\t\t'own_uname':retrieve_uname(request.user.id,decode=True),'origin':origin,'photo_caption':photo_caption,'num_sent':len(allwd_friends),\\\n\t\t'num_unsent':len(disallwd_friends),'photo_url':photo_url,'photo_id':photo_id,'photo_owner_username':photo_owner_username})", "def dnt_share_app():\r\n msg, status = \"\", True\r\n try:\r\n\r\n 'Click on Do not share button'\r\n flag1 = ui_controls.button(get_obj_identifier('a'))\r\n #flag2 = ui_controls.button(get_obj_identifier('share_dontShare_btn'))\r\n \r\n \r\n\r\n status = False if not(flag1) else True\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def test_logged_in_friend_not_in_group(self):\n\n self.make_logged_in_friend_not_in_group()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_ALLFRIENDS)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_ALLFRIENDS)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_ALLFRIENDS)", "def test_dashboards_v2_delete_share(self):\n pass", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_share_no_os_environ(self):\n with self.sys_exit_patch:\n with self.assertRaises(SystemExit):\n self.inst.share(\n \"test-container\",\n \"test-recipient\",\n \"r\",\n \"w\"\n )\n self.sys_exit_mock.assert_called_once()", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def test_should_render_for_owner_unpublished(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context(\n public=False)))", "def test_update_non_shareable(self):\n self.create_common_users_and_groups()\n sync = SyncUserAndGroups(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n auag = sync.get_all_users_and_groups()\n\n # Need to remove the common users since they don't have emails.\n auag.remove_user(\"guest\")\n auag.remove_user(\"su\")\n auag.remove_user(\"system\")\n auag.remove_user(\"tsadmin\")\n\n # Change Group 1 and Group 2 and verify change took.\n group1 = auag.get_group(\"Group 1\")\n group1.visibility = Visibility.NON_SHAREABLE\n group2 = auag.get_group(\"Group 2\")\n group2.visibility = Visibility.DEFAULT\n\n # sync updates\n sync.sync_users_and_groups(users_and_groups=auag)\n\n # verify changes\n auag = sync.get_all_users_and_groups()\n self.assertEqual(\n auag.get_group(\"Group 1\").visibility, Visibility.NON_SHAREABLE\n )\n self.assertEqual(\n auag.get_group(\"Group 2\").visibility, Visibility.DEFAULT\n )\n self.assertEqual(\n auag.get_group('Group \"3\"').visibility, Visibility.NON_SHAREABLE\n )", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_review_story_restrict_to_only_admin(self):\n self.client.post('/api/stories', headers={'token': user_token}, data=json.dumps(story1))\n res = self.client.put('/api/stories/1/review', headers={'token': user_token}, data=json.dumps({\n 'status': 'Approved'\n }))\n result = json.loads(res.data.decode())\n self.assertEqual(result['message'], 'Permission denied')\n self.assertEqual(res.status_code, 403)", "async def test_push_share_no_os_envars(self):\n with self.sys_exit_patch, \\\n self.patch_init_sharing_client_error, \\\n self.patch_get_address, \\\n self.os_environ_get_patch:\n with self.assertRaises(SystemExit):\n await self.inst._push_share(\n \"test-container\",\n \"test-recipient\",\n [\"r\", \"w\"]\n )", "def check_instance_shared_storage_local(self, context, instance):\n raise NotImplementedError()", "def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def test_tenant_secret_page_on_other_site_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=self.other_site.domain)\n self.assertEqual(response.status_code, 403)", "def share_photo_in_personal_group(request):\n\tif request.method == \"POST\":\n\t\tuser_id = request.user.id\n\t\tdecision_made = request.POST.get(\"dm\",None)\n\t\tnew_title = request.POST.get(\"nt\",None)\n\t\tif new_title:\n\t\t\t# before processing, ensure this user's all photos aren't banned:\n\t\t\tphoto_owner_id = request.session.get(\"personal_group_shared_photo_owner_id\",None)\n\t\t\tbanned, time_remaining = check_photo_upload_ban(photo_owner_id)\n\t\t\tif banned:\n\t\t\t\treturn redirect(\"cant_share_photo\")\n\t\t\telif not banned:\n\t\t\t\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\t\t\t\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\t\t\t\tif request.POST.get('dec',None) == '1':\n\t\t\t\t\tform = PersonalGroupSharedPhotoCaptionForm(request.POST)\n\t\t\t\t\tif form.is_valid():\n\t\t\t\t\t\t# change photo caption\n\t\t\t\t\t\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\t\t\t\t\t\tgroup_ids = request.session.get(\"personal_group_shared_photo_group_ids\",None)\n\t\t\t\t\t\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\t\t\t\t\t\tnew_photo_caption = form.cleaned_data.get(\"text\")\n\t\t\t\t\t\tis_limited, cooloff_time = get_rate_limit_in_personal_group_sharing(user_id)\n\t\t\t\t\t\tif is_limited:\n\t\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\t\t\t\telif not is_limited:\n\t\t\t\t\t\t\tallwd_grps, disallwd_grps = post_shared_photo_to_personal_groups(group_ids,photo_url,new_photo_caption,photo_id,\\\n\t\t\t\t\t\t\t\tphoto_owner_username,user_id, photo_owner_id)\n\t\t\t\t\t\t\ttargeted_friends = request.session.get(\"personal_group_shared_photo_group_contents\",None)\n\t\t\t\t\t\t\tallwd_friends, disallwd_friends = [], []\n\t\t\t\t\t\t\tfor group_id in allwd_grps:\n\t\t\t\t\t\t\t\tallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\t\tfor group_id in disallwd_grps:\n\t\t\t\t\t\t\t\tdisallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_caption\"] = new_photo_caption\n\t\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_allwd_friends\"] = allwd_friends\n\t\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_disallwd_friends\"] = disallwd_friends\n\t\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\t\treturn redirect(\"photo_shared\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# validation error when trying to change photo caption\n\t\t\t\t\t\tcontext = {'photo_url':photo_url,'edit_caption':True,'photo_caption':photo_caption,'form':form}\n\t\t\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\t\t\telse:\n\t\t\t\t\t# user pressed 'skip' - i.e. no change in photo caption\n\t\t\t\t\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\t\t\t\t\tgroup_ids = request.session.get(\"personal_group_shared_photo_group_ids\",None)\n\t\t\t\t\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\t\t\t\t\tis_limited, cooloff_time = get_rate_limit_in_personal_group_sharing(user_id)\n\t\t\t\t\tif is_limited:\n\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\t\t\telif not is_limited:\n\t\t\t\t\t\tallwd_grps, disallwd_grps = post_shared_photo_to_personal_groups(group_ids,photo_url,photo_caption,photo_id,\\\n\t\t\t\t\t\t\tphoto_owner_username,user_id, photo_owner_id)\n\t\t\t\t\t\ttargeted_friends = request.session.get(\"personal_group_shared_photo_group_contents\",None)\n\t\t\t\t\t\tallwd_friends, disallwd_friends = [], []\n\t\t\t\t\t\tfor group_id in allwd_grps:\n\t\t\t\t\t\t\tallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\tfor group_id in disallwd_grps:\n\t\t\t\t\t\t\tdisallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_allwd_friends\"] = allwd_friends\n\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_disallwd_friends\"] = disallwd_friends\n\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\treturn redirect(\"photo_shared\")\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\telse:\n\t\t\t\treturn redirect(\"cant_share_photo\")\n\t\telif decision_made:\n\t\t\tgroups = request.POST.getlist('gid',None)# contains group_ids in list format\n\t\t\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\t\t\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\t\t\tif groups:\n\t\t\t\tif len(groups) > PHOTO_SHARING_FRIEND_LIMIT:\n\t\t\t\t\t# return to select friends screen, alongwith message asking user to select lesser friends\n\t\t\t\t\tcontext = {'must_select_less':True,'limit':PHOTO_SHARING_FRIEND_LIMIT,'photo_url':photo_url,'photo_caption':photo_caption}\n\t\t\t\t\tgroup_and_friend = get_user_friend_list(user_id)\n\t\t\t\t\tif not group_and_friend:\n\t\t\t\t\t\tcontext[\"no_friends\"] = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontext[\"friend_data\"] = group_and_friend\n\t\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\t\t\telse:\n\t\t\t\t\tgroup_ids, group_contents = [], {}\n\t\t\t\t\tfor group in groups:\n\t\t\t\t\t\tdata = group.split(\":\",4)\n\t\t\t\t\t\tgroup_id = data[0]\n\t\t\t\t\t\tgroup_ids.append(group_id)\n\t\t\t\t\t\tgroup_contents[group_id] = {'friend_uname':data[4],'friend_avurl':data[2],'friend_id':data[3],'is_anon':data[1]}\n\t\t\t\t\trequest.session[\"personal_group_shared_photo_group_ids\"] = group_ids\n\t\t\t\t\trequest.session[\"personal_group_shared_photo_group_contents\"] = group_contents\n\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\tcontext = {'photo_url':photo_url,'edit_caption':True,'photo_caption':photo_caption,'form':PersonalGroupSharedPhotoCaptionForm()}\n\t\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\t\telse:\n\t\t\t\t# return to select friends screen, alongwith message asking user to at least select 1 friend\n\t\t\t\tcontext = {'must_select_one':True,'photo_url':photo_url,'photo_caption':photo_caption}\n\t\t\t\tgroup_and_friend = get_user_friend_list(user_id)\n\t\t\t\tif not group_and_friend:\n\t\t\t\t\tcontext[\"no_friends\"] = True\n\t\t\t\telse:\n\t\t\t\t\tcontext[\"friend_data\"] = group_and_friend\n\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\telse:\n\t\t\tpayload = request.POST.get(\"pl\").split(\":\",4)#maxsplit set to 4 to ensure caption containing ':' is not split\n\t\t\towner_username, photo_id, origin, owner_id, photo_caption = payload[0], payload[1], payload[2], payload[3], payload[4]\n\t\t\tphoto_url = request.POST.get(\"purl\")\n\t\t\trequest.session[\"personal_group_shared_photo_id\"] = photo_id\n\t\t\trequest.session[\"personal_group_shared_photo_url\"] = photo_url\n\t\t\trequest.session[\"personal_group_shared_photo_origin\"] = origin\n\t\t\trequest.session[\"personal_group_shared_photo_owner_id\"] = owner_id\n\t\t\trequest.session[\"personal_group_shared_photo_caption\"] = photo_caption\n\t\t\trequest.session[\"personal_group_shared_photo_owner_username\"] = owner_username\n\t\t\trequest.session.modified = True\n\t\t\tcontext = {'photo_url':photo_url,'photo_caption':photo_caption,'limit':PHOTO_SHARING_FRIEND_LIMIT,'origin':origin,'photo_id':photo_id,\\\n\t\t\t'owner_username':owner_username}\n\t\t\tif tutorial_unseen(user_id=user_id, which_tut='3', renew_lease=True):\n\t\t\t\tcontext[\"show_first_time_tutorial\"] = True\n\t\t\tgroup_and_friend = get_user_friend_list(user_id)\n\t\t\tif not group_and_friend:\n\t\t\t\tcontext[\"no_friends\"] = True\n\t\t\telse:\n\t\t\t\tcontext[\"friend_data\"] = group_and_friend\n\t\t\t\tcontext[\"num_friends\"] = len(group_and_friend)\n\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\telse:\n\t\treturn redirect(\"missing_page\")", "def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})", "def require_share_instance_exists(f):\n @wraps(f)\n def wrapper(context, share_instance_id, *args, **kwargs):\n share_instance_get(context, share_instance_id)\n return f(context, share_instance_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def test_unshare(self):\n\n # In the actual test, we'll want to confirm that a shared IP\n # address can be unshared from a group\n\n # Try to unshare from the group--fails for now (operation not\n # implemented in nova); note: change '10.0.0.1' to IP\n dtutil.assert_raises(novaclient.OpenStackException,\n self.server.unshare_ip, '10.0.0.1')", "def test_test_nas_share(self):\n pass", "def check_instance_shared_storage_remote(self, context, data):\n raise NotImplementedError()", "def deny_access(self, base, share, access):\n def cbk(ddict, edir, host):\n if edir not in ddict or host not in ddict[edir]:\n return True\n ddict[edir].remove(host)\n if not ddict[edir]:\n ddict.pop(edir)\n self._manage_access(share['name'], access['access_type'],\n access['access_to'], cbk)", "def protect_share_item(portal_item):\r\n # protect portal item from deletion\r\n portal_item.protect(enable=True)\r\n print('protecting portal item of', portal_item.title, 'from deletion')\r\n\r\n # share portal item in the organization\r\n portal_item.share('org')\r\n print('sharing', portal_item.title, 'in organization')", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_04_self_unshare_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.unshare_group_with_user(meowers, dog)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertFalse(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_group_unshare_users(meowers)))", "def test_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)", "def test_unshare_template_registration(self):\n pass", "def test_image_no_login(self):\n\n result = self.client.get(\"/select_image\", follow_redirects=True)\n\n self.assertIn(b\"Password\", result.data)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_auth_sharable_admin(self):\n self.do_sharable(True, 'pattieblack', None, tenant='froggy',\n is_admin=True)", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_unauthenticated_user_liking(self):\n self.like_dislike(self.dislike_url(5))", "def _allow_access(self, context, share, access, share_server=None):\n if access['access_type'] != 'ip':\n raise exception.InvalidShareAccess(\n _('Quobyte driver only supports ip access control'))\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n ro = access['access_level'] == (constants.ACCESS_LEVEL_RO)\n call_params = {\n \"volume_uuid\": volume_uuid,\n \"read_only\": ro,\n \"add_allow_ip\": access['access_to']}\n self.rpc.call('exportVolume', call_params)", "def test_aws_service_api_private_images_get(self):\n pass", "def test_kyc_get_legal_share_holders(self):\n pass", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def multi_share_enabled(self) -> bool:\n return pulumi.get(self, \"multi_share_enabled\")", "def test_permissions(self, course_dir, groupshared):\n run_nbgrader([\"db\", \"assignment\", \"add\", \"ps1\"])\n run_nbgrader([\"db\", \"student\", \"add\", \"foo\"])\n with open(\"nbgrader_config.py\", \"a\") as fh:\n if groupshared:\n fh.write(\"\"\"c.CourseDirectory.groupshared = True\\n\"\"\")\n self._empty_notebook(join(course_dir, \"source\", \"ps1\", \"foo.ipynb\"))\n run_nbgrader([\"generate_assignment\", \"ps1\"])\n\n self._empty_notebook(join(course_dir, \"submitted\", \"foo\", \"ps1\", \"foo.ipynb\"))\n run_nbgrader([\"autograde\", \"ps1\"])\n run_nbgrader([\"generate_feedback\", \"ps1\"])\n\n if not groupshared:\n if sys.platform == 'win32':\n perms = '666'\n else:\n perms = '644'\n else:\n if sys.platform == 'win32':\n perms = '666'\n dirperms = '777'\n else:\n perms = '664'\n dirperms = '2775'\n\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.html\"))\n if groupshared:\n # non-groupshared doesn't guarantee anything about directory perms\n assert self._get_permissions(join(course_dir, \"feedback\", \"foo\", \"ps1\")) == dirperms\n assert self._get_permissions(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.html\")) == perms", "def test_test_result_nas_share(self):\n pass", "def test_image_fail(self):\n client = Client()\n review1_id = Review.objects.get(content='TEST_CONTENT').id\n review2_id = Review.objects.get(content='TEST_CONTENT2').id\n review3_id = Review.objects.get(content='TEST_CONTENT3').id\n review4_id = Review.objects.get(content='TEST_CONTENT4').id\n no_review_id = review1_id + review2_id + review3_id + review4_id\n\n img_and_file = make_image_file()\n response = client.post('/api/review/'+str(review1_id)+'/image/',\n data={'image': img_and_file[1]})\n self.assertEqual(response.status_code, 401)\n client.login(username='TEST_USER_2',\n email='TEST_EMAIL_2', password='TEST_PW_2')\n response = client.post('/api/review/'+str(review1_id)+'/image/',\n data={'image': img_and_file[1]})\n self.assertEqual(response.status_code, 403)\n response = client.post('/api/review/'+str(no_review_id)+'/image/',\n data={'image': img_and_file[1]})\n self.assertEqual(response.status_code, 404)\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.get('/api/review/'+str(review1_id)+'/image/')\n self.assertEqual(response.status_code, 405)\n response = client.post('/api/review/'+str(review1_id)+'/image/',\n data={'image': img_and_file[0].tobytes()})\n self.assertEqual(response.status_code, 400)", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_show_nas_share(self):\n pass", "def test_set_share(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share=80\n ),\n status=200\n )", "def test_new_share(self):\n \n test_user_with_checkpoint = self.create_saved_test_user_with_checkpoint()\n another_test_user_to_share = self.create_saved_test_user()\n \n data = {\"user_id\": test_user_with_checkpoint.user_obj.id,\n \"to_user_id\": another_test_user_to_share.user_obj.id,\n \"signature\": gen_signature(\"put\",\n \"share\",\n gen_api_key(test_user_with_checkpoint.user_obj.access_token, \n test_user_with_checkpoint.user_obj.id)),\n \"user_checkpoint_id\": test_user_with_checkpoint.user_checkpoint_obj.id\n }\n \n resp = self.client.put(\"/share/\", data=data)\n assert \"ok\" in resp.data\n assert not get_share_w_attr(test_user_with_checkpoint.user_obj, \n another_test_user_to_share.user_obj, \n test_user_with_checkpoint.user_checkpoint_obj) is None", "def test_remove_share(self):\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam\", status=400)\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam&source=gsiftp://source\", status=204)", "def require_share_snapshot_exists(f):\n @wraps(f)\n def wrapper(context, share_snapshot_id, *args, **kwargs):\n share_snapshot_get(context, share_snapshot_id)\n return f(context, share_snapshot_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def test_dashboards_v2_list_shares(self):\n pass", "def test_unauthenticated_user_disliking(self):\n self.like_dislike(self.like_url(6))" ]
[ "0.6938848", "0.6857029", "0.68210304", "0.67752564", "0.6616296", "0.63722545", "0.6287799", "0.6185038", "0.61454177", "0.6070145", "0.6063103", "0.6063056", "0.59651726", "0.5954177", "0.5950448", "0.58537835", "0.5789933", "0.57651097", "0.5743968", "0.57354796", "0.5734228", "0.57323635", "0.5713446", "0.5712914", "0.57039493", "0.5692112", "0.5690987", "0.56862795", "0.56805223", "0.5660907", "0.56291157", "0.5623598", "0.56064993", "0.5602019", "0.55749017", "0.5568093", "0.55544037", "0.5553796", "0.5540172", "0.5521483", "0.54953283", "0.5493679", "0.54822004", "0.5463779", "0.5443239", "0.5430932", "0.5409159", "0.5402438", "0.5385317", "0.5374435", "0.53660494", "0.53589183", "0.53582555", "0.535748", "0.5349455", "0.5342982", "0.5342982", "0.5334204", "0.5329109", "0.53283566", "0.5315263", "0.5311186", "0.5298197", "0.5296413", "0.5284724", "0.52713954", "0.5268774", "0.5268248", "0.5266153", "0.52577674", "0.5254776", "0.5246709", "0.5241411", "0.5238004", "0.5235753", "0.5228594", "0.522495", "0.5224417", "0.5213871", "0.52127767", "0.5205126", "0.5197244", "0.51834834", "0.51760083", "0.51717156", "0.5171173", "0.5170514", "0.51657313", "0.5164393", "0.5160091", "0.5152992", "0.51516885", "0.5143926", "0.5142924", "0.5138285", "0.5128755", "0.51268756", "0.5124765", "0.5120667", "0.5114161" ]
0.74366206
0
Tests that an authenticated context (with is_admin set to False) can share an image it does not own if it is shared with it with can_share = True.
def test_auth_sharable_can_share(self): self.do_sharable(True, 'pattieblack', FakeMembership(True), tenant='froggy')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensure_share(self, context, share, share_server=None):\n pass", "def canShare(self):\n return False", "def ensure_share(self, context, share, share_server=None):\r\n LOG.debug(\"Ensure share.\")", "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def cant_share_photo(request, ttl=None,*args, **kwargs):\n\tif ttl:\n\t\ttry:\n\t\t\tttl = int(ttl)\n\t\texcept ValueError:\n\t\t\tttl = None\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_not_shared.html\",{'photo_caption':photo_caption,'photo_id':photo_id,'photo_url':photo_url,\\\n\t\t'photo_owner_username':photo_owner_username,'origin':origin,'ttl':ttl})", "def allow_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Allow access.\")\r\n self.helper._allow_access(share['name'], access, share['share_proto'])", "def allow_access(self, context, share, access, share_server=None):\n self._get_helper(share).allow_access('/', share, access)", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def share():\n return True", "def test_anon_shared(self):\n self.do_sharable(False, 'pattieblack', None)\n self.do_sharable(False, 'pattieblack', FakeMembership(True))", "def cli(env, identifier, account_id):\n\n image_mgr = SoftLayer.ImageManager(env.client)\n image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')\n shared_image = image_mgr.share_image(image_id, account_id)\n\n if shared_image:\n env.fout(f\"Image template {identifier} was shared to account {account_id}.\")", "def test_empty_shared(self):\n self.do_sharable(False, 'pattieblack', None, is_admin=True)\n self.do_sharable(False, 'pattieblack', FakeMembership(True),\n is_admin=True)", "def test_auth_sharable_owned(self):\n self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')", "def test_dashboards_v2_share(self):\n pass", "def deny_access(self, context, share, access, share_server=None):\n self._get_helper(share).deny_access('/', share, access)", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_share(self):\n\n # In the actual test, we'll want to confirm that an IP address\n # can be shared to a group\n\n # Try to share with the group--fails for now (operation not\n # implemented in nova); note: change 1 to group, '10.0.0.1' to IP\n dtutil.assert_raises(novaclient.OpenStackException,\n self.server.share_ip, 1, '10.0.0.1', True)", "def media_image_remotely_accessible(self) -> bool:\n return True", "def _share():\n context = get_factcheck_context()\n return make_response(render_template('share.html', **context))", "def deny_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Deny access.\")\r\n self.helper._deny_access(share['name'], access, share['share_proto'])", "def require_share_exists(f):\n @wraps(f)\n def wrapper(context, share_id, *args, **kwargs):\n share_get(context, share_id)\n return f(context, share_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def photo_shared(request):\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tallwd_friends = request.session.get(\"personal_group_shared_photo_allwd_friends\",None)\n\tdisallwd_friends = request.session.get(\"personal_group_shared_photo_disallwd_friends\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_shared.html\",{'allwd_friends':allwd_friends,'disallwd_friends':disallwd_friends,\\\n\t\t'own_uname':retrieve_uname(request.user.id,decode=True),'origin':origin,'photo_caption':photo_caption,'num_sent':len(allwd_friends),\\\n\t\t'num_unsent':len(disallwd_friends),'photo_url':photo_url,'photo_id':photo_id,'photo_owner_username':photo_owner_username})", "def test_set_share(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share=80\n ),\n status=200\n )", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})", "def test_wrong_config_shares0(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share='dfdf'\n ),\n status=400\n )", "def multi_share_enabled(self) -> bool:\n return pulumi.get(self, \"multi_share_enabled\")", "def is_screenshare(self):\n return self._is_screenshare", "def test_wrong_config_shares2(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='dfgsdfsg',\n destination='gsiftp://nowhere',\n vo='dteam',\n share=80\n ),\n status=400\n )\n\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='klhjkhjk',\n vo='dteam',\n share=80\n ),\n status=400\n )", "def test_test_nas_share(self):\n pass", "def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))", "def test_auth_sharable(self):\n self.do_sharable(False, 'pattieblack', None, tenant='froggy')", "def test_kyc_post_legal_share_holder(self):\n pass", "def test_show_nas_share(self):\n pass", "def check_instance_shared_storage_remote(self, context, data):\n raise NotImplementedError()", "def require_share_instance_exists(f):\n @wraps(f)\n def wrapper(context, share_instance_id, *args, **kwargs):\n share_instance_get(context, share_instance_id)\n return f(context, share_instance_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def test_new_share(self):\n \n test_user_with_checkpoint = self.create_saved_test_user_with_checkpoint()\n another_test_user_to_share = self.create_saved_test_user()\n \n data = {\"user_id\": test_user_with_checkpoint.user_obj.id,\n \"to_user_id\": another_test_user_to_share.user_obj.id,\n \"signature\": gen_signature(\"put\",\n \"share\",\n gen_api_key(test_user_with_checkpoint.user_obj.access_token, \n test_user_with_checkpoint.user_obj.id)),\n \"user_checkpoint_id\": test_user_with_checkpoint.user_checkpoint_obj.id\n }\n \n resp = self.client.put(\"/share/\", data=data)\n assert \"ok\" in resp.data\n assert not get_share_w_attr(test_user_with_checkpoint.user_obj, \n another_test_user_to_share.user_obj, \n test_user_with_checkpoint.user_checkpoint_obj) is None", "def check_instance_shared_storage_local(self, context, instance):\n raise NotImplementedError()", "def dnt_share_app():\r\n msg, status = \"\", True\r\n try:\r\n\r\n 'Click on Do not share button'\r\n flag1 = ui_controls.button(get_obj_identifier('a'))\r\n #flag2 = ui_controls.button(get_obj_identifier('share_dontShare_btn'))\r\n \r\n \r\n\r\n status = False if not(flag1) else True\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def _allow_access(self, context, share, access, share_server=None):\n if access['access_type'] != 'ip':\n raise exception.InvalidShareAccess(\n _('Quobyte driver only supports ip access control'))\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n ro = access['access_level'] == (constants.ACCESS_LEVEL_RO)\n call_params = {\n \"volume_uuid\": volume_uuid,\n \"read_only\": ro,\n \"add_allow_ip\": access['access_to']}\n self.rpc.call('exportVolume', call_params)", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_show_nas_share_by_nas(self):\n pass", "def test_test_result_nas_share(self):\n pass", "def test_share_01(self, mocker):\n contributors = ['alice@gmail.com']\n g = GSheets(self.fake.file_path(depth=1, category=None, extension='json'), contributors)\n g.spreadsheet = Spreadsheet(None, None)\n g.spreadsheet.share = mocker.MagicMock()\n\n g.share()\n\n assert not g.spreadsheet.share.called", "def test_kyc_put_legal_share_holder(self):\n pass", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def test_dashboards_v2_list_shares(self):\n pass", "def protect_share_item(portal_item):\r\n # protect portal item from deletion\r\n portal_item.protect(enable=True)\r\n print('protecting portal item of', portal_item.title, 'from deletion')\r\n\r\n # share portal item in the organization\r\n portal_item.share('org')\r\n print('sharing', portal_item.title, 'in organization')", "def test_share_38(self):\n with self.os_environ_get_patch, \\\n self.patch_push_share, \\\n self.patch_subprocess_call, \\\n self.sys_version_38_patch:\n self.inst.share(\n \"test-container\",\n \"test-recipient\",\n \"r\",\n \"w\"\n )\n self.subprocess_call_mock.assert_called_once()\n self.push_share_mock.assert_awaited_once()", "def share_image(self):\n portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state')\n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IFbShareSettings, check=False)\n\n if settings.content_use_own_image:\n # Stolen from collective.opengraph\n img_size = settings.content_image_size\n context = aq_inner(self.context)\n obj_url = context.absolute_url()\n if hasattr(context, 'getField'):\n field = self.context.getField('image')\n if not field and HAS_LEADIMAGE:\n field = context.getField(IMAGE_FIELD_NAME)\n \n if field and field.get_size(context) > 0:\n if img_size:\n return u'%s/%s_%s' % (obj_url, field.getName(), img_size)\n return u'%s/%s' % (obj_url, field.getName())\n \n return SiteOpenGraphMetaViewlet.share_image(self)", "def share_image(self):\n portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state')\n \n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IFbShareSettings, check=False)\n if settings.image_to_share==u'site_logo':\n portal = self.portal_state.portal()\n logoName = portal.restrictedTraverse('base_properties').logoName\n return \"%s/%s\" % (portal_state.portal_url(), logoName)\n \n share_image_view = getMultiAdapter((portal_state.portal(), self.request),\n name=u'collective.fbshare.default_image')\n if share_image_view.data():\n return \"%s/@@collective.fbshare.default_image\" % portal_state.portal_url()", "def share_photo_in_personal_group(request):\n\tif request.method == \"POST\":\n\t\tuser_id = request.user.id\n\t\tdecision_made = request.POST.get(\"dm\",None)\n\t\tnew_title = request.POST.get(\"nt\",None)\n\t\tif new_title:\n\t\t\t# before processing, ensure this user's all photos aren't banned:\n\t\t\tphoto_owner_id = request.session.get(\"personal_group_shared_photo_owner_id\",None)\n\t\t\tbanned, time_remaining = check_photo_upload_ban(photo_owner_id)\n\t\t\tif banned:\n\t\t\t\treturn redirect(\"cant_share_photo\")\n\t\t\telif not banned:\n\t\t\t\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\t\t\t\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\t\t\t\tif request.POST.get('dec',None) == '1':\n\t\t\t\t\tform = PersonalGroupSharedPhotoCaptionForm(request.POST)\n\t\t\t\t\tif form.is_valid():\n\t\t\t\t\t\t# change photo caption\n\t\t\t\t\t\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\t\t\t\t\t\tgroup_ids = request.session.get(\"personal_group_shared_photo_group_ids\",None)\n\t\t\t\t\t\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\t\t\t\t\t\tnew_photo_caption = form.cleaned_data.get(\"text\")\n\t\t\t\t\t\tis_limited, cooloff_time = get_rate_limit_in_personal_group_sharing(user_id)\n\t\t\t\t\t\tif is_limited:\n\t\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\t\t\t\telif not is_limited:\n\t\t\t\t\t\t\tallwd_grps, disallwd_grps = post_shared_photo_to_personal_groups(group_ids,photo_url,new_photo_caption,photo_id,\\\n\t\t\t\t\t\t\t\tphoto_owner_username,user_id, photo_owner_id)\n\t\t\t\t\t\t\ttargeted_friends = request.session.get(\"personal_group_shared_photo_group_contents\",None)\n\t\t\t\t\t\t\tallwd_friends, disallwd_friends = [], []\n\t\t\t\t\t\t\tfor group_id in allwd_grps:\n\t\t\t\t\t\t\t\tallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\t\tfor group_id in disallwd_grps:\n\t\t\t\t\t\t\t\tdisallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_caption\"] = new_photo_caption\n\t\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_allwd_friends\"] = allwd_friends\n\t\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_disallwd_friends\"] = disallwd_friends\n\t\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\t\treturn redirect(\"photo_shared\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# validation error when trying to change photo caption\n\t\t\t\t\t\tcontext = {'photo_url':photo_url,'edit_caption':True,'photo_caption':photo_caption,'form':form}\n\t\t\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\t\t\telse:\n\t\t\t\t\t# user pressed 'skip' - i.e. no change in photo caption\n\t\t\t\t\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\t\t\t\t\tgroup_ids = request.session.get(\"personal_group_shared_photo_group_ids\",None)\n\t\t\t\t\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\t\t\t\t\tis_limited, cooloff_time = get_rate_limit_in_personal_group_sharing(user_id)\n\t\t\t\t\tif is_limited:\n\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\t\t\telif not is_limited:\n\t\t\t\t\t\tallwd_grps, disallwd_grps = post_shared_photo_to_personal_groups(group_ids,photo_url,photo_caption,photo_id,\\\n\t\t\t\t\t\t\tphoto_owner_username,user_id, photo_owner_id)\n\t\t\t\t\t\ttargeted_friends = request.session.get(\"personal_group_shared_photo_group_contents\",None)\n\t\t\t\t\t\tallwd_friends, disallwd_friends = [], []\n\t\t\t\t\t\tfor group_id in allwd_grps:\n\t\t\t\t\t\t\tallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\tfor group_id in disallwd_grps:\n\t\t\t\t\t\t\tdisallwd_friends.append(targeted_friends[group_id])\n\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_allwd_friends\"] = allwd_friends\n\t\t\t\t\t\trequest.session[\"personal_group_shared_photo_disallwd_friends\"] = disallwd_friends\n\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\treturn redirect(\"photo_shared\")\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn redirect(\"cant_share_photo\",cooloff_time)\n\t\t\telse:\n\t\t\t\treturn redirect(\"cant_share_photo\")\n\t\telif decision_made:\n\t\t\tgroups = request.POST.getlist('gid',None)# contains group_ids in list format\n\t\t\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\t\t\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\t\t\tif groups:\n\t\t\t\tif len(groups) > PHOTO_SHARING_FRIEND_LIMIT:\n\t\t\t\t\t# return to select friends screen, alongwith message asking user to select lesser friends\n\t\t\t\t\tcontext = {'must_select_less':True,'limit':PHOTO_SHARING_FRIEND_LIMIT,'photo_url':photo_url,'photo_caption':photo_caption}\n\t\t\t\t\tgroup_and_friend = get_user_friend_list(user_id)\n\t\t\t\t\tif not group_and_friend:\n\t\t\t\t\t\tcontext[\"no_friends\"] = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontext[\"friend_data\"] = group_and_friend\n\t\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\t\t\telse:\n\t\t\t\t\tgroup_ids, group_contents = [], {}\n\t\t\t\t\tfor group in groups:\n\t\t\t\t\t\tdata = group.split(\":\",4)\n\t\t\t\t\t\tgroup_id = data[0]\n\t\t\t\t\t\tgroup_ids.append(group_id)\n\t\t\t\t\t\tgroup_contents[group_id] = {'friend_uname':data[4],'friend_avurl':data[2],'friend_id':data[3],'is_anon':data[1]}\n\t\t\t\t\trequest.session[\"personal_group_shared_photo_group_ids\"] = group_ids\n\t\t\t\t\trequest.session[\"personal_group_shared_photo_group_contents\"] = group_contents\n\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\tcontext = {'photo_url':photo_url,'edit_caption':True,'photo_caption':photo_caption,'form':PersonalGroupSharedPhotoCaptionForm()}\n\t\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\t\telse:\n\t\t\t\t# return to select friends screen, alongwith message asking user to at least select 1 friend\n\t\t\t\tcontext = {'must_select_one':True,'photo_url':photo_url,'photo_caption':photo_caption}\n\t\t\t\tgroup_and_friend = get_user_friend_list(user_id)\n\t\t\t\tif not group_and_friend:\n\t\t\t\t\tcontext[\"no_friends\"] = True\n\t\t\t\telse:\n\t\t\t\t\tcontext[\"friend_data\"] = group_and_friend\n\t\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\t\telse:\n\t\t\tpayload = request.POST.get(\"pl\").split(\":\",4)#maxsplit set to 4 to ensure caption containing ':' is not split\n\t\t\towner_username, photo_id, origin, owner_id, photo_caption = payload[0], payload[1], payload[2], payload[3], payload[4]\n\t\t\tphoto_url = request.POST.get(\"purl\")\n\t\t\trequest.session[\"personal_group_shared_photo_id\"] = photo_id\n\t\t\trequest.session[\"personal_group_shared_photo_url\"] = photo_url\n\t\t\trequest.session[\"personal_group_shared_photo_origin\"] = origin\n\t\t\trequest.session[\"personal_group_shared_photo_owner_id\"] = owner_id\n\t\t\trequest.session[\"personal_group_shared_photo_caption\"] = photo_caption\n\t\t\trequest.session[\"personal_group_shared_photo_owner_username\"] = owner_username\n\t\t\trequest.session.modified = True\n\t\t\tcontext = {'photo_url':photo_url,'photo_caption':photo_caption,'limit':PHOTO_SHARING_FRIEND_LIMIT,'origin':origin,'photo_id':photo_id,\\\n\t\t\t'owner_username':owner_username}\n\t\t\tif tutorial_unseen(user_id=user_id, which_tut='3', renew_lease=True):\n\t\t\t\tcontext[\"show_first_time_tutorial\"] = True\n\t\t\tgroup_and_friend = get_user_friend_list(user_id)\n\t\t\tif not group_and_friend:\n\t\t\t\tcontext[\"no_friends\"] = True\n\t\t\telse:\n\t\t\t\tcontext[\"friend_data\"] = group_and_friend\n\t\t\t\tcontext[\"num_friends\"] = len(group_and_friend)\n\t\t\treturn render(request,\"personal_group/sharing/share_photo_in_personal_group.html\",context)\n\telse:\n\t\treturn redirect(\"missing_page\")", "def ensure_share(self, context, share, share_server=None):\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n\n LOG.debug(\"Ensuring Quobyte share %s\", share['name'])\n\n if not volume_uuid:\n raise (exception.ShareResourceNotFound(\n share_id=share['id']))\n\n result = self.rpc.call('exportVolume', dict(\n volume_uuid=volume_uuid,\n protocol='NFS'))\n\n return self._build_share_export_string(result)", "def test_dashboards_v2_delete_share(self):\n pass", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def get_share(self, activity_user_id, activity_id, share_id):\n return None", "def require_share_snapshot_exists(f):\n @wraps(f)\n def wrapper(context, share_snapshot_id, *args, **kwargs):\n share_snapshot_get(context, share_snapshot_id)\n return f(context, share_snapshot_id, *args, **kwargs)\n wrapper.__name__ = f.__name__\n return wrapper", "def test_update_non_shareable(self):\n self.create_common_users_and_groups()\n sync = SyncUserAndGroups(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n auag = sync.get_all_users_and_groups()\n\n # Need to remove the common users since they don't have emails.\n auag.remove_user(\"guest\")\n auag.remove_user(\"su\")\n auag.remove_user(\"system\")\n auag.remove_user(\"tsadmin\")\n\n # Change Group 1 and Group 2 and verify change took.\n group1 = auag.get_group(\"Group 1\")\n group1.visibility = Visibility.NON_SHAREABLE\n group2 = auag.get_group(\"Group 2\")\n group2.visibility = Visibility.DEFAULT\n\n # sync updates\n sync.sync_users_and_groups(users_and_groups=auag)\n\n # verify changes\n auag = sync.get_all_users_and_groups()\n self.assertEqual(\n auag.get_group(\"Group 1\").visibility, Visibility.NON_SHAREABLE\n )\n self.assertEqual(\n auag.get_group(\"Group 2\").visibility, Visibility.DEFAULT\n )\n self.assertEqual(\n auag.get_group('Group \"3\"').visibility, Visibility.NON_SHAREABLE\n )", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_share_no_os_environ(self):\n with self.sys_exit_patch:\n with self.assertRaises(SystemExit):\n self.inst.share(\n \"test-container\",\n \"test-recipient\",\n \"r\",\n \"w\"\n )\n self.sys_exit_mock.assert_called_once()", "def test_api_thumbnail_retrieve_by_consumer_site_any_role(self):\n consumer_site_access = ConsumerSiteAccessFactory(\n consumer_site=self.some_video.playlist.consumer_site,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n consumer_site_access.user, self.some_thumbnail\n )", "def _deny_access(self, context, share, access, share_server=None):\n if access['access_type'] != 'ip':\n LOG.debug('Quobyte driver only supports ip access control. '\n 'Ignoring deny access call for %s , %s',\n share['name'],\n self._get_project_name(context, share['project_id']))\n return\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n call_params = {\n \"volume_uuid\": volume_uuid,\n \"remove_allow_ip\": access['access_to']}\n self.rpc.call('exportVolume', call_params)", "def test_share_inactive_user(self):\n george = self.george\n alva = self.alva\n john = self.john\n bikes = self.bikes\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(alva),\n PrivilegeCodes.NONE)\n\n # inactive users can't be granted access\n # set john to an inactive user\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n john.is_active = True\n john.save()\n\n # inactive grantor can't grant access\n # let's first grant John access privilege\n george.uaccess.share_resource_with_user(\n bikes, john, PrivilegeCodes.CHANGE)\n\n self.assertEqual(\n bikes.raccess.get_effective_privilege(john),\n PrivilegeCodes.CHANGE)\n\n john.is_active = False\n john.save()\n\n with self.assertRaises(PermissionDenied):\n john.uaccess.share_resource_with_user(\n bikes, alva, PrivilegeCodes.VIEW)", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "async def test_push_share_no_os_envars(self):\n with self.sys_exit_patch, \\\n self.patch_init_sharing_client_error, \\\n self.patch_get_address, \\\n self.os_environ_get_patch:\n with self.assertRaises(SystemExit):\n await self.inst._push_share(\n \"test-container\",\n \"test-recipient\",\n [\"r\", \"w\"]\n )", "def test_mount_status_nas_share_by_pool(self):\n pass", "def testing_post_with_share_demographic_scopes(self):\n # create a user\n self._create_user('anna', '123456')\n\n # create an application and add some extra capabilities\n application = self._create_application(\n 'an app', grant_type=Application.GRANT_AUTHORIZATION_CODE,\n redirect_uris='http://example.it')\n\n # Give the app some additional scopes.\n capability_a = self._create_capability('Capability A', [])\n capability_b = self._create_capability('Capability B', [])\n application.scope.add(capability_a, capability_b)\n\n # user logs in\n request = HttpRequest()\n self.client.login(request=request, username='anna', password='123456')\n\n # Loop through test cases in dictionary\n cases = VIEW_OAUTH2_SCOPES_TEST_CASES\n for case in cases:\n # Setup request parameters for test case\n request_bene_share_demographic_scopes = cases[case][\"request_bene_share_demographic_scopes\"]\n request_app_requires_demographic = cases[case][\"request_app_requires_demographic\"]\n request_scopes = cases[case][\"request_scopes\"]\n\n # Setup expected results for test case\n result_has_error = cases[case][\"result_has_error\"]\n result_raises_exception = cases[case].get(\"result_raises_exception\", None)\n result_exception_mesg = cases[case].get(\"result_exception_mesg\", None)\n result_token_scopes_granted = cases[case].get(\"result_token_scopes_granted\", None)\n result_access_token_count = cases[case].get(\"result_access_token_count\", None)\n result_refresh_token_count = cases[case].get(\"result_refresh_token_count\", None)\n result_archived_token_count = cases[case].get(\"result_archived_token_count\", None)\n result_archived_data_access_grant_count = cases[case].get(\"result_archived_data_access_grant_count\", None)\n\n payload = {\n 'client_id': application.client_id,\n 'response_type': 'code',\n 'redirect_uri': 'http://example.it',\n 'expires_in': 86400,\n 'allow': True,\n }\n\n # Does the application choose to require demographic info?\n application.require_demographic_scopes = request_app_requires_demographic\n application.save()\n\n # Does the beneficiary choose to block demographic info?\n if request_bene_share_demographic_scopes is not None:\n payload['share_demographic_scopes'] = request_bene_share_demographic_scopes\n\n # Scopes to be requested in the authorization request\n if request_scopes is not None:\n payload['scope'] = ' '.join(request_scopes)\n\n # Perform authorization request\n if result_has_error:\n # Expecting an error with request\n with self.assertRaisesRegexp(result_raises_exception, result_exception_mesg):\n response = self._authorize_and_request_token(payload, application)\n # Continue to next test case\n continue\n else:\n # Expecting no errors with request\n response = self._authorize_and_request_token(payload, application)\n\n # Assert auth request was successful\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode(\"utf-8\"))\n\n # Test scope in response content\n self.assertEqual(sorted(content['scope'].split()), sorted(result_token_scopes_granted))\n\n # Test scope in access_token\n at = AccessToken.objects.get(token=content['access_token'])\n scopes_granted_access_token = sorted(at.scope.split())\n self.assertEqual(scopes_granted_access_token, sorted(result_token_scopes_granted))\n\n # Verify token counts expected.\n if result_access_token_count:\n self.assertEqual(AccessToken.objects.count(), result_access_token_count)\n if result_refresh_token_count:\n self.assertEqual(RefreshToken.objects.count(), result_refresh_token_count)\n if result_archived_token_count:\n self.assertEqual(ArchivedToken.objects.count(), result_archived_token_count)\n if result_archived_data_access_grant_count:\n self.assertEqual(ArchivedDataAccessGrant.objects.count(), result_archived_data_access_grant_count)\n # Verify DataAccessGrant count is always = 1\n if result_archived_data_access_grant_count:\n self.assertEqual(DataAccessGrant.objects.count(), 1)\n\n # Test end points with APIClient\n # Test that resource end points are limited by scopes\n # Loop through all scope paths.\n for scope in SCOPES_TO_URL_BASE_PATH:\n base_path = SCOPES_TO_URL_BASE_PATH[scope][\"base_path\"]\n is_fhir_url = SCOPES_TO_URL_BASE_PATH[scope][\"is_fhir_url\"]\n test_readview = SCOPES_TO_URL_BASE_PATH[scope].get(\"test_readview\", False)\n\n # Setup token in APIClient\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=\"Bearer \" + at.token)\n\n # Mock back end FHIR resource calls\n with HTTMock(self.fhir_request_patient_readview_success_mock,\n self.fhir_request_patient_searchview_success_mock,\n self.fhir_request_eob_success_mock,\n self.fhir_request_coverage_success_mock):\n\n # Is this a FHIR type URL?\n if is_fhir_url:\n # Test SearchView for base path\n response = client.get(base_path)\n content = json.loads(response.content)\n self._assertScopeResponse(scope, scopes_granted_access_token, response, content)\n\n # Test Searchiew for base path with ending \"/\"\n response = client.get(base_path + \"/\")\n content = json.loads(response.content)\n self._assertScopeResponse(scope, scopes_granted_access_token, response, content)\n\n # Test ReadView for base path with FHIR_ID\n if test_readview:\n response = client.get(base_path + \"/\" + settings.DEFAULT_SAMPLE_FHIR_ID)\n content = json.loads(response.content)\n self._assertScopeResponse(scope, scopes_granted_access_token, response, content)\n\n # Test SearchView for base path with FHIR_ID\n response = client.get(base_path + \"?\" + settings.DEFAULT_SAMPLE_FHIR_ID)\n content = json.loads(response.content)\n self._assertScopeResponse(scope, scopes_granted_access_token, response, content)\n else:\n # Test base path.\n response = client.get(base_path)\n content = json.loads(response.content)\n self._assertScopeResponse(scope, scopes_granted_access_token, response, content)", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_create_nas_share_by_nas(self):\n pass", "def is_shared(self):\n return self._tag == 'shared'", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def test_show_nas_share_by_pool(self):\n pass", "def share(config: Config, ami: str, account: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n ec2_client.modify_image_attribute(\n ImageId=ami,\n LaunchPermission={\"Add\": [{\"UserId\": account}]},\n OperationType=\"add\",\n UserIds=[account],\n Value=\"string\",\n DryRun=False,\n )", "def is_screenshare(self, is_screenshare):\n \n self._is_screenshare = is_screenshare", "def test_permissions(self, course_dir, groupshared):\n run_nbgrader([\"db\", \"assignment\", \"add\", \"ps1\"])\n run_nbgrader([\"db\", \"student\", \"add\", \"foo\"])\n with open(\"nbgrader_config.py\", \"a\") as fh:\n if groupshared:\n fh.write(\"\"\"c.CourseDirectory.groupshared = True\\n\"\"\")\n self._empty_notebook(join(course_dir, \"source\", \"ps1\", \"foo.ipynb\"))\n run_nbgrader([\"generate_assignment\", \"ps1\"])\n\n self._empty_notebook(join(course_dir, \"submitted\", \"foo\", \"ps1\", \"foo.ipynb\"))\n run_nbgrader([\"autograde\", \"ps1\"])\n run_nbgrader([\"generate_feedback\", \"ps1\"])\n\n if not groupshared:\n if sys.platform == 'win32':\n perms = '666'\n else:\n perms = '644'\n else:\n if sys.platform == 'win32':\n perms = '666'\n dirperms = '777'\n else:\n perms = '664'\n dirperms = '2775'\n\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.html\"))\n if groupshared:\n # non-groupshared doesn't guarantee anything about directory perms\n assert self._get_permissions(join(course_dir, \"feedback\", \"foo\", \"ps1\")) == dirperms\n assert self._get_permissions(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.html\")) == perms", "def test_add_asset_share_feed(self):\n pass", "def can_be_accessed(self, user):\n if self.shared_with_everyone:\n return True\n\n if self.user == user or self.users_allowed.filter(pk=user.pk).exists():\n return True\n\n for group in self.groups_allowed.all():\n if user.groups.filter(pk=group.pk).exists():\n return True\n\n return False", "def test_mount_status_nas_share_by_nas(self):\n pass", "def test_auth_sharable_admin(self):\n self.do_sharable(True, 'pattieblack', None, tenant='froggy',\n is_admin=True)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def shared_by(self, user):\n return Shares.objects.filter(recipe=self, chef=user).exists()", "def check_and_apply_sharing(json_object, metadata_type=None, omit=[], verbose=False):\n # Build the userGroups sharing object to apply to (mostly) all metadata\n userGroup_sharing_object = dict()\n for ug_uid in userGroups_uids:\n if userGroups_codes[ug_uid] in metadata_default_user_group_sharing:\n userGroup_sharing_object[ug_uid] = {\n \"access\": metadata_default_user_group_sharing[userGroups_codes[ug_uid]],\n \"id\": ug_uid\n }\n\n # Public Access for this metadata_type\n publicAccess = 'r-------' # default\n if metadata_type == 'categoryOptions':\n publicAccess = 'r-rw----'\n elif metadata_type == 'dataSets':\n publicAccess = 'r-r-----'\n elif metadata_type == 'dashboards':\n publicAccess = '--------'\n\n if isinstance(json_object, list):\n for i in range(0, len(json_object)):\n item = json_object[i]\n if 'name' in item:\n name = item['name']\n else:\n name = \"\"\n uid = item['id']\n # Remove publicAccess, it is just an old placeholder, this goes now in sharing\n if 'publicAccess' in item:\n item.pop('publicAccess', None)\n # Remove users\n if 'user' in item:\n item.pop('user', None)\n # Remove users from userGroups\n if 'users' in item:\n item['users'] = []\n # Remove userGroupAccesses, this goes now in sharing\n if 'userGroupAccesses' in item:\n item['userGroupAccesses'] = []\n\n if 'sharing' in item and 'sharing' not in omit:\n # all aggregate metadata should be shared with public read access, with these exceptions\n # Dashboards: metadata read\n # Category options: metadata read; data readwrite\n # all tracker packages should have no public access\n if 'public' not in item['sharing']:\n item['sharing']['public'] = \"\"\n item['sharing']['public'] = publicAccess\n # Make sure package admin is the owner\n if 'owner' in item['sharing']:\n item['sharing']['owner'] = package_admin_uid\n # Clean users, we share always with userGroups\n item['sharing']['users'] = {}\n # Process userGroups in sharing\n # Remove userGroups which are not part of the package\n if 'userGroups' in item['sharing']:\n userGroupIds = list(item['sharing']['userGroups'].keys())\n # The metadata must be shared with the default user groups in the standard way\n # If it is shared with a UG of the package which is not default we will try to figure out if it\n # belongs to ACCESS, CAPTURE or ADMIN and check that the sharing is correct\n # A UG sharing which does not belong to the package is removed\n for userGroupId in userGroupIds:\n if userGroupId not in userGroups_uids:\n logger.warning('Removing incorrect user group ' + userGroupId + ' sharing in object ' + uid + \" \" + name)\n item['sharing']['userGroups'].pop(userGroupId, None)\n else:\n if userGroupId in userGroup_sharing_object:\n source_sharing = item['sharing']['userGroups'][userGroupId]['access']\n target_sharing = userGroup_sharing_object[userGroupId]['access']\n if source_sharing != target_sharing:\n logger.warning(uid + \" \" + name + \" shared with user group \" +\n userGroups_codes[userGroupId] + \" with wrong access: \" +\n source_sharing + \" (Expected \" + target_sharing + \") ... Correcting\")\n item['sharing']['userGroups'][userGroupId]['access'] = target_sharing\n # Not a default sharing but still belongs to the package\n else:\n current_ug_code = userGroups_codes[userGroupId]\n default_found = next(\n (ug_sharing_default for ug_sharing_default in ['ADMIN', 'ACCESS', 'DATA_CAPTURE'] if\n ug_sharing_default in current_ug_code), \"\")\n if default_found:\n target_sharing = metadata_default_user_group_sharing.get(default_found, \"\")\n source_sharing = item['sharing']['userGroups'][userGroupId]['access']\n if source_sharing != target_sharing:\n logger.warning(uid + \" \" + name + \" shared with user group \" +\n userGroups_codes[userGroupId] + \" with wrong access: \" +\n source_sharing + \" (Expected \" + target_sharing + \") ... Correcting\")\n item['sharing']['userGroups'][userGroupId]['access'] = target_sharing\n # Add the default sharing\n # Exception: default (cat option, category, catcombo)\n if name.lower() != 'default':\n if 'userGroups' not in item['sharing']:\n item['sharing']['userGroups'] = dict()\n for key, value in userGroup_sharing_object.items():\n item['sharing']['userGroups'][key] = value\n\n\n return json_object", "def test_04_self_unshare_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.unshare_group_with_user(meowers, dog)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertFalse(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_group_unshare_users(meowers)))", "def test_share_00(self, mocker):\n contributors = ['alice@gmail.com:user:writer', 'alice@gmail.com:user:reader']\n g = GSheets(self.fake.file_path(depth=1, category=None, extension='json'), contributors)\n g.spreadsheet = Spreadsheet(None, None)\n g.spreadsheet.share = mocker.MagicMock()\n\n g.share()\n\n assert g.spreadsheet.share.call_count == len(contributors)", "def create_share_from_snapshot(self, context, share, snapshot,\n share_server=None):\n raise NotImplementedError()", "def test_logged_in_friend_not_in_group(self):\n\n self.make_logged_in_friend_not_in_group()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_ALLFRIENDS)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_ALLFRIENDS)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_ALLFRIENDS)", "def boxShare(client, file_id, email):\n\tfor i in file_id:\n\t\tclient.file(i).collaborate_with_login(email, role='VIEWER')", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_review_story_restrict_to_only_admin(self):\n self.client.post('/api/stories', headers={'token': user_token}, data=json.dumps(story1))\n res = self.client.put('/api/stories/1/review', headers={'token': user_token}, data=json.dumps({\n 'status': 'Approved'\n }))\n result = json.loads(res.data.decode())\n self.assertEqual(result['message'], 'Permission denied')\n self.assertEqual(res.status_code, 403)", "def test_remove_share(self):\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam\", status=400)\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam&source=gsiftp://source\", status=204)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_unshare(self):\n\n # In the actual test, we'll want to confirm that a shared IP\n # address can be unshared from a group\n\n # Try to unshare from the group--fails for now (operation not\n # implemented in nova); note: change '10.0.0.1' to IP\n dtutil.assert_raises(novaclient.OpenStackException,\n self.server.unshare_ip, '10.0.0.1')", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def share_link(cls, user, link):", "def share_link(cls, user, link):", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_kyc_get_legal_share_holders(self):\n pass", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])" ]
[ "0.7160142", "0.71592975", "0.70918334", "0.70580095", "0.65058416", "0.635865", "0.6244558", "0.62139976", "0.6194017", "0.6169052", "0.61176854", "0.6066014", "0.605547", "0.59966147", "0.59790087", "0.59150696", "0.58924675", "0.5879898", "0.5861287", "0.57947785", "0.5792991", "0.5792887", "0.5688322", "0.56635785", "0.5662321", "0.5661412", "0.5624886", "0.5596383", "0.5578899", "0.5570365", "0.55654526", "0.554308", "0.5530934", "0.5513383", "0.5507529", "0.54877424", "0.54867136", "0.5476157", "0.54721653", "0.5471704", "0.5450875", "0.54425013", "0.5439752", "0.543695", "0.5434362", "0.54304624", "0.5411293", "0.54030454", "0.5395684", "0.53940636", "0.5383922", "0.53750753", "0.5368495", "0.5359598", "0.5355842", "0.5353832", "0.534498", "0.5343353", "0.53261125", "0.5305074", "0.5301745", "0.5289218", "0.5269868", "0.52687037", "0.52529746", "0.5243862", "0.5235717", "0.5223105", "0.5217525", "0.5210184", "0.52099216", "0.5208274", "0.5207316", "0.5204179", "0.5204133", "0.51830256", "0.5181889", "0.5171392", "0.51624125", "0.51556677", "0.5147286", "0.51242566", "0.5108104", "0.5102961", "0.51008064", "0.5099698", "0.5092738", "0.50910914", "0.50894684", "0.50856", "0.50823534", "0.5076233", "0.50745517", "0.50617313", "0.5041044", "0.5036629", "0.5036629", "0.5033449", "0.50310576", "0.50264496" ]
0.6919593
4
Init args with argparse
def init_args(): parser = argparse.ArgumentParser(description='Create xls for Tom') parser.add_argument('start', metavar='N', type=int, help='starting ' 'number') parser.add_argument('total_x', metavar='N', type=int, help='total number of x rows') parser.add_argument('total_y', metavar='N', type=int, help='total number of y columns') parser.add_argument('filename', metavar='NAME', default='test.csv', type=str, help='file name to write to, should end in ' 'csv') return parser.parse_args()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_args():\n parser = argparse.ArgumentParser(\n description=\"DeltaSherlock Client software.\")\n parser.add_argument('-v', '--version', action='version', version=VERSION)\n parser.add_argument('-c', '--config', action='store', dest='config_file',\n default='./config.ini', help=\"Path to config file. [default: \\\n %(default)s]\")\n parser.add_argument('-d', '--daemon', action='store_true', dest='daemon',\n default=False, help=\"Run in daemon mode. [default: \\\n %(default)s]\")\n return parser.parse_args()", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def setup_args(cls, parser):\n pass", "def add_args(self, parser):", "def __init__(self, args: argparse.Namespace):\n self._args = args", "def initialize():\n\n parser = argparse.ArgumentParser(\n description='This function takes a gene count file, a gene name, and \\\n an output file as parameters, and creates a file with the \\\n sample IDs and counts for that gene.')\n parser.add_argument('-i',\n '--data',\n type=str,\n help='The file name of the dataset.',\n required=True)\n parser.add_argument('-g',\n '--gene',\n type=str,\n help='The name of the target gene.',\n required=True)\n parser.add_argument('-o',\n '--output',\n type=str,\n help='The file name of the output file.',\n required=True)\n\n args_parse = parser.parse_args()\n\n return args_parse", "def parse_arguments(args):", "def setup_args() -> argparse.ArgumentParser:\n main_parser = argparse.ArgumentParser(prog=\"gh\")\n subparsers = main_parser.add_subparsers(dest=\"subparser\")\n command_parser = subparsers.add_parser(\"commands\", help=\"Runs a command\")\n command_parser.add_argument(\n \"choice\",\n help=\"The chosen command to run\",\n choices=gh.commands.OPTIONS.keys(),\n )\n analytics_parser = subparsers.add_parser(\"analytics\", help=\"Runs an analysis\")\n analytics_parser.add_argument(\n \"choice\",\n help=\"The chosen analysis to run\",\n choices=gh.analytics.OPTIONS.keys(),\n )\n return main_parser", "def init_arg_parser(args):\n arg_parser = argparse.ArgumentParser(\n description='Control node for the InMoov robot head. Receives movement commands and calculates trajectory.')\n\n arg_parser.add_argument('-p', '--showplot',\n action='store_true',\n dest='showplot',\n help='Triggers display of plot for calculated trajectory')\n\n arg_parser.add_argument('--version', action='version', version='%(prog)s 0.1a')\n\n args = arg_parser.parse_args(args)\n\n return args", "def setup_args():\n parser = ParlaiParser()\n parser.add_argument(\n '-n',\n '--num-episodes',\n default=-1,\n type=int,\n help='Total number of episodes to convert, -1 to convert all examples',\n )\n parser.add_argument(\n '-of',\n '--outfile',\n default=None,\n type=str,\n help='Output file where to save, by default will be created in /tmp',\n )\n parser.add_argument(\n '-s1id', '--speaker-0-id', type=str, help='Speaker id of agent who speaks first'\n )\n parser.add_argument(\n '-s1id',\n '--speaker-1-id',\n type=str,\n help='Speaker id of agent who speaks second',\n )\n parser.add_argument(\n '--prepended-context',\n type='bool',\n default=False,\n help='specify if the context is prepended to the first act',\n )\n parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)\n parser.set_defaults(datatype='train:ordered')\n\n return parser", "def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser", "def set_args() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser( # type: argparse.ArgumentParser\n description=r'''\n -----------------------------------\n < Pull DNA barcodes from FASTQ files >\n -----------------------------------\n /\n \\ ______/ V`-, /\n } /~~\n /_)^ --,r'\n |b |b\n ''',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n add_help=False\n )\n # Arguments for verbosity and logging\n parser.add_argument( # Verbosity\n '-v',\n '--verbosity',\n dest='verbosity',\n type=str.lower,\n choices=_VERBOSITY_LEVELS,\n default=_VERBOSITY_DEFAULT,\n required=False,\n metavar='verbosity',\n help=\"Set the verbosity level, choose from '%s'; defaults to '%s'\" % (\"', '\".join(_VERBOSITY_LEVELS), _VERBOSITY_DEFAULT)\n )\n parser.add_argument( # Number of cores\n '--parallel',\n dest='num_cores',\n type=_num_cores,\n const=None,\n default=1,\n nargs='?',\n required=False,\n metavar='num jobs',\n help=\"Run %(prog)s in parallel; if passed, can optionally specify the number of jobs to run at once\"\n )\n parser.add_argument( # Output directory\n '-o',\n '--output-directory',\n dest='outdirectory',\n type=str,\n default=_OUTDIR_DEFAULT,\n required=False,\n metavar='output directory',\n help=\"Choose where all output files are to be stored; defaults to '%s'\" % _OUTDIR_DEFAULT\n )\n # Input arguments\n inputs = parser.add_argument_group(\n title='input arguments',\n description='Provide inputs for %(prog)s'\n )\n inputs.add_argument( # Forward FASTQ\n '-f',\n '--forward-fastq',\n dest='forward',\n type=str,\n default=None,\n required=True,\n metavar='FORWARD FASTQ',\n help=\"Provide a filepath for the forward/single FASTQ file\"\n )\n inputs.add_argument( # Reverse FASTQ\n '-r',\n '--reverse-fastq',\n dest='reverse',\n type=str,\n default=None,\n required=False,\n metavar='REVERSE FASTQ',\n help=\"Provide a filepath for the optional reverse FASTQ file\"\n )\n inputs.add_argument( # Sample sheet\n '-s',\n '--sample-sheet',\n dest='sample_sheet',\n type=str,\n default=None,\n required=True,\n metavar='SAMPLE SHEET',\n help=\"Provide a filepath for the sample sheet\"\n )\n inputs.add_argument( # Barcodes file\n '-b',\n '--barcodes',\n dest='barcodes',\n type=str,\n required=True,\n default=None,\n metavar='BARCODES',\n help=\"Provide a filepath for the barcodes CSV file\"\n )\n barcodes = parser.add_argument_group(\n title='barcode options',\n description=\"Set parameters for barcode demultiplexing\"\n )\n barcodes.add_argument( # Number of errors allowed\n '-e',\n '--error',\n dest='error',\n type=int,\n default=_ERROR_DEFAULT,\n required=False,\n metavar='ERROR',\n help=\"This is how many mismatches in the barcode we allowed before rejecting, defaults to %s\" % _ERROR_DEFAULT\n )\n return parser", "def setUp(self):\n self.parser = command_line.get_args()", "def _configure_args(self, parser: ArgumentParser) -> ArgumentParser:\n pass", "def setup_parser(self, parser, args):\r\n\r\n pass", "def setup_args():\n parser = argparse.ArgumentParser(\n description=\"Take probe set and generate MSA for all variants for \"\n \"each gene\")\n\n parser.add_argument(\n \"-o\", \"--output_path\",\n help=\"Directory to save the output to. Default: Current Directory\",\n type=str, default='.')\n\n parser.add_argument(\n \"-p\", \"--probe\",\n help=\"Path to the probe fasta.\",\n type=str,\n required=True)\n\n parser.add_argument(\n \"-g\", \"--gene_refs\",\n help=\"Directory where gene references are located.\",\n required=True,\n type=str)\n\n args = parser.parse_args()\n return args", "def Args(parser):", "def add_args(parser: argparse.ArgumentParser):\n pass", "def parse_args(args=None):\n\n\tparser = argparse.ArgumentParser(description=\"A simple python based static generator.\")\n\tparser.add_argument(\"--init\", action=\"store_true\", help=\"Initialize project.\")\n\tparser.add_argument(\"--gen\", action=\"store_true\", help=\"Generate static.\")\n\tparser.add_argument(\"--collect-static\", action=\"store_true\", help=\"Collect static.\")\n\n\tif args:\n\t\treturn parser.parse_args(args)\n\treturn parser.parse_args()", "def init(*, args: List[str]) -> None:\n logs.show_presentation()\n execute.parse_args(args=args)", "def parse_args():\n parser = ArgumentParser()\n parser.add_argument('-t', '--timer', action='store_true', \\\n help='Time the first random generation')\n parser.add_argument('-i', '--ibmq', default='', help='IBMQ token')\n parser.add_argument('-b', '--backend', default='', help='IBMQ backend')\n return parser.parse_args()", "def set_args():\n # Initialise argparse object\n parser = argparse.ArgumentParser(description='Set some arguments for our script')\n # Add some arguments, elements are: short form name, long form name, type of input expected\n # default value if you don't set an argument, help string (shown if you run with --help)\n # nargs is so that we can define multiple values for a single argument\n\n parser.add_argument('-q', '--query-terms', type=str, default='Venezuela Covid',\n help='list of strings to search for', nargs='*')\n\n parser.add_argument('-p', '--page-limit', type=int,\n help='number to limit search pages to')\n\n # set the argument parser and return\n args = parser.parse_args()\n return args", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def fill_args(args):\n args.agent_module = 'dstar_sgolam_walker'\n args.checkpoint_path = None\n args.exp_config = 'configs/baselines/dstar_proto_sgolam.yaml'\n args.num_episodes = 25\n \n return args", "def setup_args(self):\n self.parser = argparse.ArgumentParser()\n self.group = self.parser.add_mutually_exclusive_group()\n\n self.group.add_argument('-a', '--add', help='Adds a new task to the task list', action='store_true')\n self.group.add_argument('-r', '--remove', help='Removes a task from the task list', action='store_true')\n self.group.add_argument('-f', '--finish', help='Sets a task to be finished', action='store_true')\n self.group.add_argument('-u', '--unfinish', help='Sets a task to be not finished', action='store_true')\n self.group.add_argument('-c', '--change', help='Updates an existing task', action='store_true')\n self.group.add_argument('-v', '--view', help='View your current task list', action='store_true')\n\n return self.parser", "def _parse_args(self):\n parser = argparse.ArgumentParser()\n _, args = parser.parse_known_args()\n self.args = [a for a in args if a != '']", "def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument()\n return p.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"auth\",\n help=\"authentication string for Infermedica API: \"\n \"APP_ID:APP_KEY or path to file containing it.\")\n parser.add_argument(\"--model\",\n help=\"use non-standard Infermedica model/language, \"\n \"e.g. infermedica-es\")\n args = parser.parse_args()\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--urls_dirpath', type=unicode)\n parser.add_argument('-r', '--resources_dir', type=unicode)\n parser.add_argument('-t', '--total_docs', type=int)\n parser.add_argument('-m', '--mapping', type=unicode,\n help='File with the yago to lkif mapping')\n\n return parser.parse_args()", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def __init__(self):\n self.parser = argparse.ArgumentParser(prog='PROG')\n self.parser.add_argument(\"--idir\", action=\"store\",\n dest=\"idir\", default=\"\", help=\"Input data path\")\n self.parser.add_argument(\"--dates\", action=\"store\",\n dest=\"dates\", default=\"\", help=\"dates or dates-rante to read, e.g. YYYYMMDD-YYYYMMDD\")", "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input_path\", required=True)\n parser.add_argument(\"-c\", \"--config\", required=True)\n return parser.parse_args()", "def main() -> None:\n init(args=sys.argv[1:])", "def load_args():\n parser = argparse.ArgumentParser(description=\"Classify and predict digits using the mnist dataset\")\n parser.add_argument('mode', help='the mode to run in: fit, model or predict')\n parser.add_argument('--algo', help='which algorithm to use: RandomForest, KNN')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--start\",\n type=str,\n default=\"\",\n required=False,\n help=\"The start square of the agent, in the form row,col. If not specified, this is randomized\"\n )\n parser.add_argument(\n \"--actions\",\n type=str,\n required=True,\n nargs = '+',\n help=\"The actions the agent takes, comma delimited\"\n )\n parser.add_argument(\n \"--observations\",\n type=str,\n required=True,\n nargs = '+',\n help=\"The observations the agent makes, comma delimited\"\n )\n return parser.parse_args()", "def _prepare(self):\n # Customize commandline arguments\n parser = argparse.ArgumentParser()\n self.initArgumentParser(parser, defaults=self.default_binding_overrides)\n self.__options = parser.parse_args()\n self.__bindings.update(args_util.parser_args_to_bindings(self.__options))\n\n self.start_logging()", "def initialise(self, args, environ):", "def setup_cmd_args():\n parser = argparse.ArgumentParser(description=\"Translate XLS files to appropriate XML format for ingestion in FEDEO.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"xlsfile\", help=\"The XLS file to parse\")\n parser.add_argument('-outputdir', help=\"directory to output the XML file\", default='output')\n parser.add_argument('-j', action='store_true', help=\"Also export JSON file\")\n parser.add_argument('-p', action='store_true', help=\"Pretty print XML file\")\n parser.add_argument('-o', action='store_true', help=\"Overwrite output XML file\")\n parser.add_argument('-l', action='store_true', help=\"Skip I_G_LN as mandatory field\", default=False)\n return parser.parse_args()", "def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main", "def parse_args():\n parser = argparse.ArgumentParser(description='baseline Mask R-CNN')\n parser.add_argument('--dataset', required=True,\n metavar=\"/path/to/dataset/\",\n help='Directory of the dataset')\n parser.add_argument('--continue_train', type=str, required=False, default='None',\n metavar=\"/path/to/latest/weights.h5\", help=\"Path to lastest training weights .h5 file\")\n parser.add_argument('--weight', required=False,\n metavar='/path/to/pretrained/weight.h5', help=\"Path to trained weight\")\n parser.add_argument('--image', required=False,\n metavar='/path/to/testing/image/directory', help=\"Path to testing image directory\")\n parser.add_argument('--video', required=False,\n metavar='/path/to/testing/image/directory', help=\"Path to testing image directory\")\n return parser.parse_args()", "def parser_setup():\n ap = argparse.ArgumentParser(description=__doc__)\n ap.add_argument(\"-c\", \"--config-dir\", default=\".\",\n help=\"Configuration directory. Contains YAML configuration\"\n \"files.\")\n ap.add_argument(\"-v\", \"--verbose\", action=\"count\", default=1,\n help=\"Print copious debugging info.\")\n ap.add_argument(\"-q\", \"--quiet\", action=\"count\", default=0,\n help=\"Suppress output. -qq to suppress ALL output.\")\n ap.add_argument(\"-p\", \"--profile\", default=\"all\",\n help=\"Dashboard profile to load from dashdef.yml\")\n ap.add_argument(metavar=\"HOST\", nargs=\"*\", dest=\"host_globs\",\n help=\"Host glob.\")\n return ap", "def __init_arg_parser() -> argparse.ArgumentParser:\n\n parser = argparse.ArgumentParser(\n description='Run this app to label images by appending the label at the front of the filename.')\n\n parser.add_argument('source_directory',\n type=str,\n help='Directory from where images will we be taken.'\n 'Subdirectories included.')\n\n parser.add_argument('destination_directory',\n type=str,\n help='Directory where labeled images will be stored.')\n parser.add_argument('-b', '--backup-directory',\n default=None,\n type=str,\n help='Directory to store images that were left unlabeled. Leaving this option will'\n 'add a label to the image as per the label formatter used in code.')\n\n return parser", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def init(argParser=\"default\", defaultLog=None, verbosity=0, **kwargs):\n\n if argParser is not None:\n if isinstance(argParser, str):\n # construct new parser based on command name\n args = _makeParser(argParser, **kwargs).parse_args()\n else:\n # use provided parser\n args = argParser.parse_args()\n\n defaultLog = None if not hasattr(args, \"log\") or args.log.lower() == \"none\" else args.log\n verbosity = args.verbose if hasattr(args, \"verbose\") else 0\n\n else:\n # don't parse anything, use default values\n args = None\n\n setupLogging(defaultLog, verbosity=verbosity)\n\n return args", "def __add_arguments__(cls, parser):", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"Sets up package within the pheeno's directory.\"\n )\n\n # Required arguments\n parser.add_argument(\"-x\", \"--execute\", action=\"execute\", required=True,\n help=\"something\", default=False)\n\n # Optional arguments\n parser.add_argument(\"-s\", \"--save\", action=\"store\", required=False,\n help=\"something\", default=False)", "def parse_arguments(cls):\r\n parser = argparse.ArgumentParser(description='Easy Infer for model benchmark')\r\n cls.base_arg_parse(parser)\r\n cls.model_arg_parse(parser)\r\n cls.task_arg_parse(parser)\r\n args = parser.parse_args()\r\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\n \"config_path\",\n type=str,\n help=\"Path to the JSON configuration file containing the image transformation settings.\",\n )\n parser.add_argument(\n \"img_path\",\n type=str,\n help=\"Path to the input image file to apply transformations.\",\n )\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser(description=\"Arguments for data exploration\")\n parser.add_argument(\"--tokenize\",\n dest=\"tokenize\",\n action=\"store_true\",\n help=\"Tokenize by words and sentences, counting averages/sd for each.\")\n return parser", "def parse_args():\n parser = argparse.ArgumentParser(\"BaselineMembershipInferenceAttack\")\n parser.add_argument(\"--batch_size\",\n type=int, default=128,\n help=\"The batch size of normal training.\")\n parser.add_argument(\"--train_epoch\",\n type=int, default=10,\n help=\"The epoch of training.\")\n parser.add_argument(\"--train_lr\",\n type=float, default=0.0002,\n help=\"The learning rate of training.\")\n args = parser.parse_args()\n return args", "def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n #parser.add_argument(\"args\", metavar=\"N\", type=str, nargs=\"*\", help=\"Positional arguments.\")\n #parser.add_argument(\"\", dest=\"\", type=\"\", default=, help=)\n #parser.add_argument(\"--version\", action=\"version\", version=\"<the version>\")\n\n return parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description='Run DAFI.')\n parser.add_argument('input_file', help='Name (path) of input file')\n return parser.parse_args()", "def _init_argparser():\n desc = 'SES status and metrics reporting utility (part of sasutils).'\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('-d', '--debug', action=\"store_true\",\n help='enable debugging')\n\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-c', '--carbon', action='store_true',\n help='output SES Element descriptors metrics in a '\n 'format suitable for Carbon/Graphite')\n group.add_argument('-s', '--status', action='store_true',\n help='output status found in SES Element descriptors')\n\n group = parser.add_argument_group('output options')\n group.add_argument('--prefix', action='store',\n default='sasutils.ses_report',\n help='carbon prefix (example: \"datacenter.cluster\",'\n ' default is \"sasutils.ses_report\")')\n group.add_argument('-j', '--json', action='store_true',\n help='alternative JSON output mode')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n \n parser.add_argument('--p', dest='path_in',\n action='store', type=str, required=True, default='',\n help=\"Path relative to the data/ directory, to the input ATL01, ANC13, and ANC27 files.\")\n parser.add_argument('--atl01', dest='atl01_file',\n action='store', type=str, required=False, default=None,\n help=\"Path + filename to directory of the ATL01.\")\n parser.add_argument('--anc13', dest='anc13_path',\n action='store', type=str, required=False, default=None,\n help=\"Path to outputs directory of the ANC13.\") \n parser.add_argument('--anc27', dest='anc27_path',\n action='store', type=str, required=False, default=None,\n help=\"Path to directory of the ANC27.\")\n\n args = parser.parse_args()\n \n return args", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def setup_options_parser(self, argparser):\n pass", "def parse_args(args=None):\n return AP.parse_args(args=args)", "def parse_arguments():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--accessions\", help=\"A json file with old/new family mapppings\")\n parser.add_argument(\"--add-header\", help=\"Print descriptive header\",\n action=\"store_true\", default=False)\n parser.add_argument(\"--add-links\", help=\"Creates hyperlinks to available Rfam html content\",\n action=\"store_true\", default=False)\n return parser", "def init(arg):\n arg.add_argument('-ls', '--list', help='List functions: Takes \"all\", \"partialName*\", \"exactName\"', type=str)\n arg.add_argument('-f', '--func', help='Function to call', type=str)\n arg.add_argument('-a', '--args', help='Argument list for the function', action='append', type=str)\n arg.add_argument('-kw', '--kwargs', help='Keyword arg list, like \"-kw k=b\" or \"-kw k=JSON\"', action='append', type=str)\n arg.add_argument('-q', help='Quiet, use if calling from a script', action='store_true')\n arg.add_argument('-?', dest=\"helpme\", help='Print help for function', action='store_true')\n arg.add_argument('--printResult', help='Print the return value, format if needed', choices=['str', 'json'], type=str, default=None)", "def prepare_arg_parser():\n\tparser = argparse.ArgumentParser(\n\t\tdescription=\"\"\"Ajout des ponctuations réelles dans un xml de \n\t\t refbibs (NB lent: ~ 2 doc/s sur 1 thread)\"\"\",\n\t\tusage=\"\"\"ragreage.py \n\t\t -x ech/tei.xml/oup_Human_Molecular_Genetics_ddp278.xml\n\t\t -p ech/pdf/oup_Human_Molecular_Genetics_ddp278.pdf\n\t\t -m [bibzone|biblines|bibfields|authornames]\"\"\",\n\t\tepilog=\"- © 2014-15 Inist-CNRS (ISTEX) romain.loth at inist.fr -\"\n\t\t)\n\t\n\t\n\tparser.add_argument('-x','--xmlin',\n\t\tmetavar='path/to/xmlfile',\n\t\thelp=\"\"\"\n\t\tpath to a TEI.xml with citations in <biblStruct> xml format \n\t\t(perhaps to be created from native XML by a call like \n\t\t`saxonb-xslt -xsl:tools/Pub2TEI/Stylesheets/Publishers.xsl\n\t\t-s:exemples_RONI_1513/rsc_1992_C3_C39920001646.xml`)'\"\"\",\n\t\ttype=str,\n\t\trequired=True,\n\t\taction='store')\n\t\t\n\t\n\t\n\tparser.add_argument('-p','--pdfin',\n\t\tmetavar='path/to/pdffile',\n\t\thelp=\"\"\"path to a pdf file of the same text, for attempted\n\t\t pdftottext and citation regexp match\"\"\",\n\t\ttype=str,\n\t\tdefault=None , # cf juste en dessous\n\t\taction='store')\n\t\n\tparser.add_argument('-t','--txtin',\n\t\tmetavar='path/to/txtfile',\n\t\thelp=\"\"\"pdfin can be replaced by a path to a txt flow.\n\t\tThis input text must be very close to the xml content\n\t\t(or segment thereof, in accordance with a chosen -m type)\"\"\",\n\t\ttype=str,\n\t\tdefault=None , # cf juste en dessous\n\t\taction='store')\n\t\t\n\t\n\t\n\tparser.add_argument('-m','--model-type',\n\t\tmetavar='name-of-model',\n\t\thelp=\"\"\"format output as a valid tei's 'listBibl' (default)\n\t\t or tailored to a Grobid crf model pseudotei input among:\n\t\t {'bibzone', 'biblines', 'bibfields', 'authornames'}\"\"\",\n\t\ttype=str,\n\t\tdefault='listBibl' ,\n\t\taction='store')\n\t\n\t\n\tparser.add_argument('-d','--debug',\n\t\tmetavar=1,\n\t\ttype=int,\n\t\thelp='logging level for debug info in [0-3]',\n\t\tdefault=0,\n\t\taction='store')\n\t\n\t\t\n\tparser.add_argument('-r', '--remainder',\n\t\tdest='mask',\n\t\thelp='show mask after matches instead of normal output',\n\t\taction='store_true')\n\t\n\treturn parser", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--log_path',\n type=str,\n default='../../logs/static',\n help='path of benchmark logs')\n parser.add_argument(\n '--standard_path',\n type=str,\n default='../../scripts/benchmark_ci/standard_value/static',\n help='path of standard_value')\n args = parser.parse_args()\n return args", "def _parse_args():\n args = sys.argv[1:]\n cmd_parser = argparse.ArgumentParser()\n cmd_parser.add_argument(\n '--produce-sub',\n dest='produce_sub',\n help='Produce submision file',\n default=False,\n action='store_true',\n )\n cmd_parser.add_argument(\n '--search-cv',\n dest='search_cv',\n help='Perform Search of parameters',\n default=False,\n action='store_true',\n )\n cmd_opts = cmd_parser.parse_args(args=args)\n return cmd_opts", "def parseArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='fsod', help='training dataset') # use fsod dataset for default\n parser.add_argument('--cfg', dest='cfg_file', required=True, help='optional config file')\n parser.add_argument('--load_ckpt', help='path to load checkpoint')\n parser.add_argument('--load_detectron', help='path to load detectron weight pickle file')\n parser.add_argument('--output_dir', help='output directory to save the testing results.')\n parser.add_argument('--range', help='[start, end)', type=int, nargs=2)\n parser.add_argument('--visualize', dest='visualize', help='output images of detection', action='store_true')\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description=_program_description)\n parser.add_argument('input_file', help=_input_file_description)\n #parser.add_argument('-v', '--verbose', action='store_true', \n # default=False, help='show progress')\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--zarr_dir',\n type=str,\n help='path to directory of zarr files',\n )\n parser.add_argument(\n '--tiff_dir',\n type=str,\n help='path to directory of tiff files',\n )\n parser.add_argument(\n '--output_dir',\n type=str,\n help='path to directory for writing',\n )\n parser.add_argument(\n '--config_path',\n type=str,\n default=None,\n help='path to yaml preprocess config file',\n )\n \n args = parser.parse_args()\n return args", "def get_args():\n # create the parser\n parser = argparse.ArgumentParser()\n # Add the arguments to be parsed\n parser.add_argument(\"--num_rollouts\", type=int, default=1, help=\"Number of times to rollout agent in env\")\n parser.add_argument(\"--render\", choices=('True','False'), help=\"Render the rollout\")\n parser.add_argument(\"--seed\", type=int, default=4)\n parser.add_argument(\"--x_thresh\", type=float, default=1.5)\n args = parser.parse_args()\n args.render = True if args.render == 'True' else False\n\n return args", "def arg_parse():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-f\",\n \"--fpath\",\n type=str,\n required=True,\n help=\"Path to files to generate test data from e.g. /badc/cmip5/data/cmip5/output1/MOHC/HadGEM2-ES/rcp85/mon/atmos/Amon/r1i1p1/latest/tas\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--time_only\",\n default=False,\n help=\"Only generate one time step of this dataset\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--step\",\n type=int,\n default=100,\n help=\"Step to select latitude/longitude by. Only relevant when time_only is False\",\n )\n\n parser.add_argument(\n \"-n\",\n \"--number\",\n type=int,\n default=0,\n help=\"Number of files to generate. Default is all files. Only relevant when time_only is False\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--level\",\n type=int,\n default=-1,\n help=\"Number of levels to extract, starting with index 0.\",\n )\n\n parser.add_argument(\n \"-c\", \"--compress\", help=\"Compress the files.\", action=\"store_true\"\n )\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"SLOWFAST for AVA Dataset\")\n parser.add_argument(\"--pipeline\", type=str,\n default=\"../data/config/slowfast.pipeline\", help=\"SDK infer pipeline\")\n parser.add_argument(\"--data_dir\", type=str, default=\"../data/input\",\n help=\"Dataset contain frames and ava_annotations\")\n args_opt = parser.parse_args()\n return args_opt", "def update_args(self, args):\n self.args = self.parser.parse_args(args)", "def add_args(self): \n self.parser.add_argument('-u', '--username',\n default=None,\n help='the username for mongoDB (Default: None)')\n\n self.parser.add_argument('-p', '--password',\n default=None,\n help='the password for mongoDB (Default: None)')\n\n self.parser.add_argument('-d', '--database',\n default='grits',\n help='the database for mongoDB (Default: grits)')\n\n self.parser.add_argument('-m', '--mongohost',\n default='localhost',\n help='the hostname for mongoDB (Default: localhost)')\n\n self.parser.add_argument('-f', '--force', \n action='store_true',\n help='do not require confirmation to create indexes (Default: False)')", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Run NCF..\")\n parser.add_argument(\n \"--config_file\",\n nargs=\"?\",\n type=str,\n default=\"../configs/ncf_default.json\",\n help=\"Specify the config file name. Only accept a file from ../configs/\",\n )\n # If the following settings are specified with command line,\n # These settings will used to update the parameters received from the config file.\n parser.add_argument(\n \"--dataset\",\n nargs=\"?\",\n type=str,\n help=\"Options are: tafeng, dunnhunmby and instacart\",\n )\n parser.add_argument(\n \"--data_split\",\n nargs=\"?\",\n type=str,\n help=\"Options are: leave_one_out and temporal\",\n )\n parser.add_argument(\n \"--root_dir\", nargs=\"?\", type=str, help=\"working directory\",\n )\n parser.add_argument(\n \"--emb_dim\", nargs=\"?\", type=int, help=\"Dimension of the embedding.\"\n )\n parser.add_argument(\"--lr\", nargs=\"?\", type=float, help=\"Intial learning rate.\")\n parser.add_argument(\"--max_epoch\", nargs=\"?\", type=int, help=\"Number of max epoch.\")\n parser.add_argument(\n \"--batch_size\", nargs=\"?\", type=int, help=\"Batch size for training.\"\n )\n parser.add_argument(\"--optimizer\", nargs=\"?\", type=str, help=\"OPTI\")\n parser.add_argument(\"--activator\", nargs=\"?\", type=str, help=\"activator\")\n parser.add_argument(\"--alpha\", nargs=\"?\", type=float, help=\"ALPHA\")\n return parser.parse_args()", "def parse_args():\n parser = default_argument_parser()\n parser.add_argument(\"--label-map\",\n dest=\"label_map\",\n type=pathlib.Path,\n help=\"Label map in YAML format which maps from category \"\n \"ID to name.\")\n parser.add_argument(\"--train-csv\",\n dest=\"train_csv\",\n required=True,\n type=pathlib.Path,\n help=\"Path to training data CSV file.\")\n parser.add_argument(\"--valid-csv\",\n dest=\"valid_csv\",\n required=False,\n type=pathlib.Path,\n help=\"Optional path to validation data CSV file.\")\n parser.add_argument(\n \"--image-width\",\n type=int,\n help=\"Image width (optional, used to speed up dataset processing).\")\n parser.add_argument(\n \"--image-height\",\n type=int,\n help=\"Image height (optional, used to speed up dataset processing).\")\n return parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument('password' , type=bytearray)\n parser.add_argument('authenticator' , type=bytearray)\n parser.add_argument('encrypted_password' , type=bytearray)\n\n return parser.parse_args()", "def __init__(self, *args, **kwargs):\n argparse.ArgumentParser.__init__(self, *args, **kwargs)\n self.add_argument(\n '--log-level', env_var='COSA_LOG_LEVEL', default='info',\n choices=log._nameToLevel.keys(), help='Set the log level')", "def parse_args():\n global Args\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n pars_simulation(subparsers)\n pars_analyze(subparsers)\n Args = parser.parse_args()", "def __parse_cmd_args():\n parser = argparse.ArgumentParser(description='Python Image Downloader.')\n parser.add_argument(\"-f\", \"--file\",\n help=\"Where the URL file is located.\")\n parser.add_argument(\"-d\", \"--dir\",\n help=\"Where the downloaded files are to be stored.\")\n args = parser.parse_args()\n return args", "def setup_cmd_args():\n parser = argparse.ArgumentParser(description=\"This program will query G-POD and COPHUB on the same datasets, in order to obtain the number of data results, compare them compile a report with the differences.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # parser.add_argument(\"root_dir\", help=\"The root directory containing data to check\")\n # parser.add_argument(\"--workspace\", help=\"Set Workspace manually\")\n parser.add_argument(\"--outputlist\", help=\"Folder to write the output lists with the un-synced products.\", default=\"c:\\\\temp\\\\\")\n parser.add_argument(\"--daysback\", help=\"Report with a given number of days back from today\", default=0)\n parser.add_argument(\"--dataset\", help=\"Set which dataset to query (chose S3A_SR_1_SRA_A_PREOPS or S3B_SR_1_SRA_A_NTC)\")\n parser.add_argument(\"--startdate\", help=\" The Start Date (format: YYYY-MM-DD) \", default=\"2016-06-01\")\n parser.add_argument(\"--enddate\",help=\" The End Date (format: YYYY-MM-DD)\")\n parser.add_argument(\"--cphubuser\",help=\"COPHUB username\", required=True)\n parser.add_argument(\"--cphubpw\",help=\"COPHUB password\", required=True)\n parser.add_argument(\"-email\", type=str, help=\"Email to send the results\", action=\"append\")\n parser.add_argument('-t', action='store_true', help=\"Today as enddate. Otherwise the last day of the previous month is considered.\")\n parser.add_argument('-n', action='store_true', help=\"Normal numeric check\")\n parser.add_argument('-m', action='store_true', help=\"Monthly check with product listing.\")\n return parser.parse_args()", "def parse_args():\n parser = ArgumentParser(\n description=\"This is a script for auto apply ipex optimization.\"\n \"\\n################################# Basic usage ############################# \\n\"\n \"\\n 1. Apply ipex optimization with fp32 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex python_script args \\n\"\n \"\\n 2. Apply ipex optimization with bf16 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex --dtype bfloat16 python_script args \\n\",\n formatter_class=RawTextHelpFormatter,\n )\n\n add_auto_ipex_params(parser, auto_ipex_default_enabled=True)\n\n # positional\n parser.add_argument(\n \"program\",\n type=str,\n help=\"The full path to the proram/script to be launched. \"\n \"followed by all the arguments for the script\",\n )\n # rest from the training program\n parser.add_argument(\"program_args\", nargs=REMAINDER)\n return parser.parse_args()", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def parse_arguments():\n\n args = Arguments()\n parser = argparse.ArgumentParser(\"Update river flow directions\")\n parser.add_argument('python_config_filename',\n metavar='python-config-filename',\n help='Full path to python configuration file',\n type=str)\n #Adding the variables to a namespace other than that of the parser keeps the namespace clean\n #and allows us to pass it directly to main\n parser.parse_args(namespace=args)\n return args", "def add_arguments(cls, arg_parser: ArgParser) -> None:", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"\"\"Assemble raw reads using ABySS\"\"\"\n )\n parser.add_argument(\n \"--output\",\n required=True,\n action=FullPaths,\n default=None,\n help=\"\"\"The directory in which to store the assembly data\"\"\",\n )\n parser.add_argument(\n \"--kmer\", type=int, default=31, help=\"\"\"The kmer value to use\"\"\"\n )\n parser.add_argument(\n \"--cores\",\n type=int,\n default=1,\n help=\"\"\"The number of compute cores/threads to run with Trinity\"\"\",\n )\n parser.add_argument(\n \"--subfolder\",\n type=str,\n default=\"\",\n help=\"\"\"A subdirectory, below the level of the group, containing the reads\"\"\",\n )\n parser.add_argument(\n \"--verbosity\",\n type=str,\n choices=[\"INFO\", \"WARN\", \"CRITICAL\"],\n default=\"INFO\",\n help=\"\"\"The logging level to use\"\"\",\n )\n parser.add_argument(\n \"--log-path\",\n action=FullPaths,\n type=is_dir,\n default=None,\n help=\"\"\"The path to a directory to hold logs.\"\"\",\n )\n parser.add_argument(\n \"--clean\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Cleanup all intermediate Trinity files\"\"\",\n )\n parser.add_argument(\n \"--abyss-se\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Only use abyss-se\"\"\",\n )\n # one of these is required. The other will be set to None.\n input_data = parser.add_mutually_exclusive_group(required=True)\n input_data.add_argument(\n \"--config\",\n type=is_file,\n action=FullPaths,\n default=None,\n help=\"\"\"A configuration file containing reads to assemble\"\"\",\n )\n input_data.add_argument(\n \"--dir\",\n type=is_dir,\n action=FullPaths,\n default=None,\n help=\"\"\"A directory of reads to assemble\"\"\",\n )\n return parser.parse_args()", "def __parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--force', action=\"store_true\", default=False,\n help='overwrite existing database files during import')\n parser.add_argument('-e', '--extension', action=\"store\", default='txt',\n help='specify file extension. default is \"txt\"')\n parser.add_argument('-d', '--delimiter', action=\"store\", default='\\t',\n help='specify column delimiter. default is tab (\\\\t)')\n parser.add_argument('-m', '--mark', action=\"store\", default='.',\n help='specify decimal mark for numeric data. default is'\n ' dot (.)')\n parser.add_argument('-o', '--outformat', action=\"store\", default='npz',\n help='specify output database format. default is \"npz\"'\n ' for numpy database. use \"mat\" for matlab '\n ' database format.')\n parser.add_argument('-r', '--recursive', action=\"store_true\", default=False,\n help='recursively walk through all sub-directories of'\n ' current working directory')\n parser.add_argument('-p', '--pcs', action=\"store_true\", default=True,\n help='indicate if files are pcs files.')\n parser.add_argument('-c', '--colheadlines', action=\"store\", default='1',\n help='number of lines spanned by the column headers')\n args = parser.parse_args()\n return args", "def parseargs() -> argparse.ArgumentParser:\n\n parser = worker.parseargs(\"ACT hybrid-analysis.com Client\")\n\n parser.add_argument(\n \"--feed\", action=\"store_true\", help=\"Download the public feed only, no lookup\"\n )\n\n parser.add_argument(\n \"--apikey\", default=\"\", help=\"community apikey for hybrid-analysis.com\"\n )\n\n parser.add_argument(\n \"--user-agent\", default=\"Falcon Sandbox\", help=\"User agent while talking to API\"\n )\n\n parser.add_argument(\n \"--no-check-certificate\",\n action=\"store_true\",\n help=\"Do not check SSL certificate\",\n )\n\n return parser", "def init_argparser() -> ArgumentParser:\n parser = ArgumentParser()\n from_config = parser.add_argument_group('From config file', 'Provide full experiment setup via config file')\n from_config.add_argument('-c', '--config', help='Path to json file containing classification config.')\n from_cmd = parser.add_argument_group('From commandline', 'Specify experiment setup via commandline arguments')\n\n # Model options\n from_cmd.add_argument(\"--recoding_type\", type=str, default=None,\n choices=[\"mc_dropout\", \"surprisal\", \"ensemble\"],\n help=\"Recoding model type used for trainign. Choices include recoding based on MC Dropout,\"\n \"perplexity and anchored ensembles. If not specified, a vanilla model without recoding\"\n \"is used.\")\n from_cmd.add_argument(\"--step_type\", type=str, default=None, choices=[\"fixed\", \"mlp\", \"learned\"],\n help=\"Specifies the way the step size is determined when using a recoding model.\")\n from_cmd.add_argument(\"--step_size\", type=float,\n help=\"Step size for recoding in case the fixed step predictor is used.\")\n from_cmd.add_argument(\"--embedding_size\", type=int, help=\"Dimensionality of word embeddings.\")\n from_cmd.add_argument(\"--hidden_size\", type=int, help=\"Dimensionality of hidden states.\")\n from_cmd.add_argument(\"--num_layers\", type=int, help=\"Number of network layers.\")\n from_cmd.add_argument(\"--mc_dropout\", type=float, help=\"Dropout probability when estimating uncertainty.\")\n from_cmd.add_argument(\"--dropout\", type=float, help=\"Dropout probability for model in general.\")\n from_cmd.add_argument(\"--num_samples\", type=int, help=\"Number of samples used when estimating uncertainty.\")\n\n # Training options\n from_cmd.add_argument(\"--weight_decay\", type=float, help=\"Weight decay parameter when estimating uncertainty.\")\n from_cmd.add_argument(\"--prior_scale\", type=float,\n help=\"Prior length scale. A lower scale signifies a prior belief that the input data is \"\n \"distributed infrequently, a higher scale does the opposite.\")\n from_cmd.add_argument(\"--learning_rate\", type=float, help=\"Learning rate during training.\")\n from_cmd.add_argument(\"--batch_size\", type=int, help=\"Batch size during training.\")\n from_cmd.add_argument(\"--num_epochs\", type=int, help=\"Number of training epochs.\")\n from_cmd.add_argument(\"--clip\", type=float, help=\"Threshold for gradient clipping.\")\n\n # Corpus options\n from_cmd.add_argument(\"--corpus_dir\", type=str, help=\"Directory to corpus files.\")\n from_cmd.add_argument(\"--max_seq_len\", type=int, help=\"Maximum sentence length when reading in the corpora.\")\n\n # Screen output optins\n from_cmd.add_argument(\"--print_every\", type=int, help=\"Batch interval at which training info should be printed.\")\n from_cmd.add_argument(\"--eval_every\", type=int,\n help=\"Epoch interval at which the model should be evaluated on validation set.\")\n\n # Model saving and logging options\n from_cmd.add_argument(\"--model_name\", type=str, help=\"Model identifier.\")\n from_cmd.add_argument(\"--model_save_path\", type=str,\n help=\"Directory to which current best model should be saved to.\")\n from_cmd.add_argument(\"--device\", type=str, default=\"cpu\", help=\"Device used for training.\")\n from_cmd.add_argument(\"--log_dir\", type=str, help=\"Directory to write (tensorboard) logs to.\")\n\n return parser", "def command_line_args(parser):\n AbyssAssembler.command_line_args(parser)\n SpadesAssembler.command_line_args(parser)\n TrinityAssembler.command_line_args(parser)\n VelvetAssembler.command_line_args(parser)", "def __init__(self, *args, **kwargs):\n argparse.ArgumentParser.__init__(self, *args, **kwargs)\n self.add_argument(\n '--log-level', env_var='COSA_LOG_LEVEL', default='INFO',\n choices=log._nameToLevel.keys(), help='Set the log level')", "def init():\n global opts\n global args\n \n # get options\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hvs:b:n:\", [\"--help\", \"--version\", \n \"--suffix=\", \"--begin=\", \"--name=\"])\n except getopt.GetoptError as err:\n print str(err)\n getHelp()\n sys.exit()\n else:\n parseOpts()", "def parse_args(args):\n\n parser = argparse.ArgumentParser(\n description=\"\"\"Generates and runs an afni_proc.py script to preprocess resting state fMRI data\"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n # Optional Flags\n parser.add_argument(\"-t\", \"--trs_remove\", action=\"store\", default=5, type=int, metavar='TRs',\n help=\"\"\"number of trs to remove at the beginning of the epi data\n (default = 5 trs)\"\"\")\n parser.add_argument(\"-d\", \"--dim_voxel\", action=\"store\", default=2.0, type=float, metavar='MM',\n help=\"voxel dimensions in mm that processed epi will be resampled to (default = 2.0 mm)\")\n parser.add_argument(\"-b\", \"--bandpass\", action=\"store\", default=[0.01, 0.25], nargs=2, type=float, metavar=\"F\",\n help=\"bandpass frequencies lower and upper limits (default = 0.01 0.25)\")\n parser.add_argument(\"-v\", \"--volumes\", action=\"store\", default=0, type=int, metavar=\"V\",\n help=\"\"\"truncate the epi data to the inputted number of volumes, useful if subjects have data \n with different numbers of volumes (default = no truncation)\"\"\")\n parser.add_argument(\"-f\", \"--fwhm\", action=\"store\", default=5.0, type=float, metavar=\"MM\",\n help=\"the full width half maximum that is used when blurring (default = 5.0 mm)\")\n parser.add_argument(\"-c\", \"--cores\", action=\"store\", default=cpu_count(), type=int, metavar=\"C\",\n help=\"number of cores supplied to 3dDeconvolve (default = all cores)\")\n parser.add_argument(\"-s\", \"--subj_id\", action=\"store\", default=\"sub\", metavar=\"SUB\",\n help=\"text file of subject ids (default = sub)\")\n parser.add_argument(\"-T\", \"--time_step\", action=\"store\", default=0, type=float, metavar=\"TS\",\n help=\"set the time step for bandpassing (default = ts in header info\")\n\n parser.add_argument(\"-g\", \"--global_signal_regression\", action=\"store_false\", default=True,\n help=\"do not perform global signal regression (default = perform gsr)\")\n\n parser.add_argument(\"-r\", \"--rerun\", action=\"store_true\", default=False,\n help=\"\"\"rerun preprocessing, override and delete previous results in \n 'Processed' folder (default = don't override)\"\"\")\n parser.add_argument(\"-m\", \"--motion_param\", action=\"store_true\", default=False,\n help=\"use 12 motion parameters for regression (default = 6 motion parameters)\")\n parser.add_argument(\"-G\", \"--gm_blur\", action=\"store_true\", default=False,\n help=\"blur only in grey matter mask (default = blur in whole brain)\")\n parser.add_argument(\"-n\", \"--nl_reg\", action=\"store_true\", default=False,\n help=\"use non-linear warp between anatomical and MNI template (default = linear warp)\")\n\n # Required Inputs\n required = parser.add_argument_group(\"required arguments\")\n required.add_argument(\"-e\", \"--epi\", action=\"store\", required=True,\n help=\"text file of paths to raw epi data\")\n required.add_argument(\"-a\", \"--anat\", action=\"store\", required=True,\n help=\"text file of paths to raw anatomical data\")\n required.add_argument(\"-o\", \"--out_dir\", action=\"store\", required=True, metavar=\"OUT\",\n help=\"text file of paths to output directory\")\n result = parser.parse_args(args)\n\n # Make sure inputted parameters are legal\n assert (os.path.isfile(result.epi)), \"{} does not exist or is not a file\".format(result.epi)\n assert (os.path.isfile(result.anat)), \"{} does not exist or is not a file\".format(result.ant)\n assert (result.trs_remove >= 0), \"Cannot remove negative trs\"\n assert (result.dim_voxel >= 0), \"Cannot have a negative voxel dimension\"\n assert (np.all(np.array(result.bandpass) > 0)), \"Cannot have a negative frequency limit for bandpassing\"\n assert (result.volumes > -1), \"Number of volumes must be greater than 0\"\n assert (result.cores > 0), \"Number of cores used must be greater than 0\"\n assert (result.time_step > -1), \"Time step must be greater than 0\"\n\n return result", "def _setup_argument_parser(self, argument_parser):\n pass", "def get_argumets():\n\n parser = argparse.ArgumentParser( description='Interactive 6DoF pose annotator')\n parser.add_argument('--cimg', type=str, default='data/rgb.png',\n help='file name of the RGB image of the input scene.')\n parser.add_argument('--dimg', type=str, default='data/depth.png',\n help='file name of the depth image of the input scene. We assume that RGB and depth image have pixel-to-pixel correspondence.')\n parser.add_argument('--intrin', type=str, default='data/realsense_intrinsic.json',\n help='file name of the camera intrinsic.')\n parser.add_argument('--model', type=str, default='data/hammer_mm.ply',\n help='file name of the object model (.pcd or .ply).')\n parser.add_argument('--init', type=str, default='data/init.json',\n help='file name of the initial transformation (.json).')\n \n return parser.parse_args()", "def add_step_args(cls, parser):", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"in_fq\", help=\"The fastq file containing Hi-C reads.\")\n parser.add_argument(\n \"-r\",\n \"--reference\",\n required=True,\n help=\"Path to the reference genome, in FASTA format.\",\n )\n parser.add_argument(\n \"-p\",\n \"--nb_processors\",\n default=1,\n type=int,\n help=\"number of CPUs used for alignment.\",\n )\n parser.add_argument(\n \"-o\",\n \"--out_sam\",\n help=\"Path to the output SAM file for the alignment of in_fq.\",\n )\n parser.add_argument(\n \"-T\",\n \"--tempdir\",\n default=\".\",\n help=\"Directory to write temporary files. Defaults to current directory.\",\n )\n parser.add_argument(\n \"-m\",\n \"--minimap2\",\n default=False,\n action=\"store_true\",\n help=\"Use minimap2 instead of bowtie for the alignment.\",\n )\n parser.add_argument(\n \"-l\",\n \"--min_len\",\n type=int,\n default=20,\n help=\"Minimum length to which reads should be truncated.\",\n )\n return parser.parse_args()", "def add_arguments(self, parser):", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def parse_args():\n parser = argparse.ArgumentParser(description='Google reminders cli',\n epilog=usage,\n formatter_class=argparse.RawTextHelpFormatter)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n argparse.ArgumentParser(argument_default=False)\n parser.add_argument('-r', type=str, required=True, metavar='[reference.fa]', help=\"Reference file in fasta format\")\n parser.add_argument('-s', type=int, metavar='number of SNPs', help=\"Number of SNPs to introduce\")\n parser.add_argument('-i', type=int, metavar='number of indels', help=\"Number of small indels to introduce. Indel sizes randomly sampled between 1-100bp with a ratio of 9:1 for size being < 11bp\")\n parser.add_argument('-d', type=int, metavar='number of large deletions', help=\"Number of large deletions to introduce. Deletion size randomly sampled between 100-5000bp\")\n parser.add_argument('-t', type=int, metavar='number of translocations', help=\"number of translocations, regions will be randomly selected ranging between 500-5000bp in size\")\n parser.add_argument('-nc', type=bool, default=False, metavar='add at least one non-conserved translocation', help=\"True/False add one non-conserved translocation. Deletion occurs from end of the translocation. Default=False\")\n parser.add_argument('-f', type=str, required=True, metavar='output fasta filename', help=\"Output fasta filename and file path\")\n parser.add_argument('-v', type=str, required=True, metavar='output vcf file of mutations', help=\"Output vcf filename and file path\")\n\n return parser.parse_args()", "def __init__(self, *args, **kwargs):\n Cli.__init__(self, *args, **kwargs)\n # Set common arguments\n self.add_argument(\n '--build', default='latest',\n help='Override build id, defaults to latest')\n self.add_argument(\n '--buildroot', default='builds', help='Build diretory')\n self.add_argument(\n '--dump', default=False, action='store_true',\n help='Dump the manfiest and exit')" ]
[ "0.79507685", "0.7793449", "0.77239007", "0.77109313", "0.76677924", "0.7543103", "0.7493199", "0.7482822", "0.7471515", "0.74647325", "0.74401", "0.73976904", "0.7346049", "0.732926", "0.7325903", "0.73169327", "0.7307563", "0.7270258", "0.72573745", "0.7214987", "0.7203224", "0.71864635", "0.7180025", "0.71503913", "0.71410227", "0.71085864", "0.7107059", "0.710331", "0.71027225", "0.70999277", "0.7092108", "0.70914614", "0.7090357", "0.7082141", "0.7068388", "0.70662135", "0.70527285", "0.70497996", "0.7042134", "0.7041293", "0.703141", "0.7023519", "0.70136696", "0.7013645", "0.70132375", "0.7013032", "0.70112026", "0.70111406", "0.70101786", "0.70101404", "0.6996935", "0.698885", "0.6985885", "0.69795245", "0.69778925", "0.6970902", "0.6965431", "0.69644934", "0.69615006", "0.6960638", "0.6954423", "0.69542617", "0.6953587", "0.6949521", "0.69354856", "0.693306", "0.69251204", "0.691291", "0.69087034", "0.6901205", "0.6900517", "0.68996763", "0.6890615", "0.6882952", "0.6881311", "0.68786967", "0.6878656", "0.68780565", "0.6877944", "0.68747777", "0.68716574", "0.6871107", "0.6870824", "0.68661714", "0.6864352", "0.68631583", "0.68614805", "0.68589854", "0.6856188", "0.68545085", "0.6846265", "0.6845903", "0.68444693", "0.6829222", "0.68290955", "0.6829037", "0.68267864", "0.68247724", "0.6820562", "0.6818502" ]
0.76917267
4
loads file FILTER, returns filter matrix
def load_filter(): if not os.path.isfile(FILTER): print('no filter found, creating square grid') return [] with open(FILTER, 'r') as ff: reader = csv.reader(ff) l = list(reader) ar = numpy.asarray(l) # ar = numpy.transpose(ar, (0, 1)) # ar = numpy.flip(ar, 1) # ar = numpy.rot90(ar, k=3, axes=(0, 1)) # ar = numpy.swapaxes(ar, 0, 1) f = list(map(list, ar)) return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_filter_file(self, file_path): \n self._pop_all_self()\n self.filter_list = []\n self.file_path = file_path \n \n with codecs.open(self.file_path, 'r', encoding='cp1252') as fid: \n for k, line in enumerate(fid):\n line = line.lstrip('\\n\\r ')\n if line.startswith('#'):\n continue \n split_line = [item.strip() for item in line.split('\\t')]\n if k==0:\n # Header\n header = split_line\n else:\n line_dict = dict(zip(header, split_line))\n self[line_dict['variable']] = SingleFilter(line_dict, self.parameter)\n\n # Save attributes\n for item in self.keys():\n setattr(self, item, self[item])\n \n self.header = sorted(header)\n \n if self.filter_type == 'data':\n self.year_list = [y for y in range(self['YEAR_INTERVAL'].value[0], \n self['YEAR_INTERVAL'].value[1]+1)]", "def _read_filter_data(filename):\n gains = []\n freqs = []\n freq_scale = 0\n with open(filename) as f:\n for line in f:\n words = line.split()\n if line.startswith('Freq'):\n _, scale = words[0].split(\"(\")\n scale = scale.rstrip(\")\")\n if scale==\"Hz\":\n freq_scale = 1\n elif scale==\"kHz\":\n freq_scale = 1e3\n elif scale==\"MHz\":\n freq_scale = 1e6\n elif scale==\"GHz\":\n freq_scale = 1e9\n else:\n raise ValueError(\"Cannot parse line: '\"+line+\"'\")\n elif len(words)==3 and words[0]!=\"Total\":\n f, g, p = line.split(\",\")\n freq = float(f) * freq_scale\n gain = float(g)\n phase = float(p)\n freqs.append(freq)\n gains.append(gain * np.exp(1j*phase))\n\n return np.array(gains), np.array(freqs)", "def _load_filter(self, fname, interp=True, lamb=None):\n ftab = self.hdf\n if hasattr(fname, 'decode'):\n fnode = ftab.get_node('/filters/' + fname.decode('utf8'))\n else:\n fnode = ftab.get_node('/filters/' + fname)\n flamb = fnode[:]['WAVELENGTH']\n transmit = fnode[:]['THROUGHPUT']\n dtype = 'photon'\n unit = None\n\n attrs = fnode.attrs\n if 'DETECTOR' in attrs:\n dtype = attrs['DETECTOR']\n if 'WAVELENGTH_UNIT' in attrs:\n unit = attrs['WAVELENGTH_UNIT']\n\n fil = UnitFilter(flamb, transmit, name=fnode.name,\n dtype=dtype, unit=unit)\n\n if interp & (lamb is not None):\n fil = fil.reinterp(lamb)\n return fil", "def read_filter(filter_file):\n\n fd = open(filter_file, \"r\")\n lines = fd.readlines()\n fd.close()\n\n wavelengths = []\n weights = []\n for line in lines:\n line = line.strip()\n words = line.split()\n wavelengths.append(float(words[0]))\n weights.append(float(words[1]))\n\n return (wavelengths, weights)", "def parseFilter(filterList):\n filter_mat = None\n for line in filterList:\n try:\n line = np.array([float(x) for x in line.split()])\n if line.shape[0] != len(filterList):\n raise Exception(\"Filter must be square, pad with zeroes if you need a non-square filter\")\n\n if filter_mat is None:\n filter_mat = line\n else:\n filter_mat = np.vstack((filter_mat,line))\n except ValueError:\n logging.fatal(\"Invalid configuration: filter must contain only numbers\"); exit()\n except Exception as e:\n logging.fatal(e); exit()\n return filter_mat", "def unpack(self, filter_file_type=\".dat\", verbose=False):\n\n if hasattr(self, \"phot\"):\n filter_names = np.unique(self.phot[\"filter\"])\n\n self.phot.add_index('filter', unique = True)\n\n\n for filter_name in filter_names:\n\n phot_table = self.phot.loc[\"filter\", filter_name]\n filter_filename = filter_name + filter_file_type\n if verbose: print(filter_filename)\n if verbose: print(phot_table)\n if verbose: print(type(filter_name), type(filter_file_type))\n\n # phot_table.meta = {\"filter_filename\": filter_filename}\n phot_table.meta[\"filter_filename\"] = filter_filename\n if not isinstance(phot_table, Row):\n # if len(np.unique(self.phot.loc[\"filter\", filter_name][\"MJD\"])) > 1:\n indices = phot_table.argsort(\"MJD\")\n # for column_name in phot_table.colnames:\n # phot_table[column_name] = phot_table[column_name][indices]\n sorted_phot_table = Table([phot_table[column_name][indices] for column_name in phot_table.colnames])\n else:\n sorted_phot_table = phot_table\n\n filter_key = np.unique(phot_table[\"filter\"])[0]\n\n if len(np.unique(phot_table[\"filter\"])) > 1 or filter_key != filter_name:\n raise errors.FilterMismatchError(\"There is a more than one filterdata in here! or there is a mismatch with filename\")\n path_to_filter = os.path.join(self.filter_directory, phot_table.meta['filter_filename'])\n\n # def load_filter(path, cmap = False, verbose = False):\n #\n if utils.check_file_path(os.path.abspath(path_to_filter)):\n filter_object = FilterClass()\n filter_object.read_filter_file(os.path.abspath(path_to_filter), verbose = verbose)\n filter_object.calculate_AB_zp()\n else:\n warnings.warn(\"Couldn't load the filter\")\n\n self.data_filters[filter_key] = filter_object\n\n self.data[filter_name] = sorted_phot_table\n\n self.filter_names = filter_names\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n\n pass", "def __init__(self, file_name):\n self.file_name = file_name\n\n self.A = 1\n self.B = 0\n self.C = 1\n self.R = FILTER_R\n self.Q = FILTER_Q\n\n self.data_stream = []", "def load_filters(self):\n buffer_dict = dict(self.named_buffers())\n n = 0\n\n for k in self.phi_f.keys():\n if type(k) != str:\n self.phi_f[k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for psi_f in self.psi1_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n psi_f[sub_k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for psi_f in self.psi2_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n psi_f[sub_k] = buffer_dict['tensor' + str(n)]\n n += 1", "def _load_filter(self, fname, **kwargs):\n with self as current_lib:\n return UnitLickIndex(fname, current_lib._content[fname])", "def _load_filter(self, fname, **kwargs):\n with self as s:\n return LickIndex(fname, s._content[fname])", "def _load_filter(self, fname, interp=True, lamb=None, *args, **kwargs):\n try:\n fil = UnitFilter.from_ascii(fname, *args, **kwargs)\n except Exception:\n content = self.content\n r = [k for k in content if fname in k]\n\n if len(r) <= 0: # try all lower for filenames (ascii convention)\n r = [k for k in content if fname.lower() in k]\n\n if len(r) > 1:\n print(\"auto correction found multiple choices\")\n print(r)\n raise ValueError('Refine name to one of {0}'.format(r))\n elif len(r) <= 0:\n raise ValueError('Cannot find filter {0}'.format(fname))\n else:\n fil = UnitFilter.from_ascii(r[0], *args, **kwargs)\n if (interp is True) and (lamb is not None):\n return fil.reinterp(lamb)\n else:\n return fil", "def read_flt(input_file):\n\n if input_file.endswith('.flt') or input_file.endswith('.hdr'):\n input_file = input_file[:-4]\n else:\n print 'Incorrect filename'\n return 0,0 #exits module gracefully\n\n headers = read_headers(input_file)\n\n #read the data as a 1D array and reshape it to the dimensions in the header\n raster_array = read_bin(input_file).reshape(int(headers[1]), int(headers[0]))\n raster_array = raster_array.reshape(int(headers[1]), int(headers[0])) #rows, columns\n\n return raster_array, headers", "def _read_filters(self, path):\n blob = utils.read_blob_file_contents(path)\n try:\n rules = json.loads(blob)\n except ValueError as e:\n msg = _(\n \"An error occurred when reading filters from file \"\n \"%(path)s: %(error)s\"\n ) % {\"path\": path, \"error\": e}\n raise exceptions.CommandError(msg)\n else:\n return rules", "def getFileAsFiltFloatMatrix(dirPath, filt, columns, delim=\",\"):\n\tmat = list()\n\tfor rec in fileFiltSelFieldsRecGen(dirPath, filt, columns, delim):\n\t\tmat.append(asFloatList(rec))\n\treturn mat", "def load_embedded(index, filename):\n weights = list()\n input_folder = os.path.join('input_files', 'embedded_matix')\n with open(os.path.join(input_folder, filename), 'r') as csvfile:\n filereader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in filereader:\n cat_ix = int(row[0])\n if index[cat_ix] == row[1].strip():\n weights.append([float(x) for x in row[2:]])\n csvfile.close()\n return np.array(weights)", "def load_filter(filename):\n # parse config file\n if not os.path.isfile(filename):\n raise IOError('File \"%s\" does not exist' % filename)\n try:\n f = open(filename)\n except IOError:\n raise IOError('Could not open file \"%s\"' % filename)\n\n cfg_items = []\n for (i, line) in enumerate(f):\n try:\n # remove all comments and unnecessary whitespace\n normalizer = shlex.shlex(line)\n normalizer.wordchars += '.-'\n normal_line = ' '.join([t for t in normalizer])\n if normal_line:\n # split up normalized line and build dictionary\n cfg_item = {}\n for part in normal_line.split(','):\n cfg_split = shlex.split(part)\n key = cfg_split.pop(0)\n value = cfg_split\n cfg_item[key] = value\n cfg_items.append(cfg_item)\n except (IndexError, ValueError):\n raise RuntimeError( \\\n 'Could not parse line %i of file \"%s\"' % (i, filename))\n\n # look for global bit settings\n bits_global = None\n factor_bits_global = None\n norm_bits_global = None\n for cfg_item in cfg_items:\n if 'bits_global' in cfg_item:\n if bits_global is None:\n [bits_global] = cfg_item.pop('bits_global')\n bits_global = int(bits_global)\n else:\n raise RuntimeError( \\\n 'bits_global must not be specified more than once')\n if 'factor_bits_global' in cfg_item:\n if factor_bits_global is None:\n [factor_bits_global] = cfg_item.pop('factor_bits_global')\n factor_bits_global = int(factor_bits_global)\n else:\n raise RuntimeError( \\\n 'factor_bits_global must not be specified more than once')\n if 'norm_bits_global' in cfg_item:\n if norm_bits_global is None:\n [norm_bits_global] = cfg_item.pop('norm_bits_global')\n norm_bits_global = int(norm_bits_global)\n else:\n raise RuntimeError( \\\n 'norm_bits_global must not be specified more than once')\n\n # remove empty items from cfg_items, only node definitions should be left\n cfg_items = filter(None, cfg_items)\n\n # look for filter nodes\n filter_nodes = {}\n adjacency = {}\n input_node = None\n output_node = None\n for cfg_item in cfg_items:\n # mandatory settings\n try:\n [node] = cfg_item['node']\n except KeyError:\n raise RuntimeError('Node type not specified')\n try:\n [name] = cfg_item['name']\n except KeyError:\n raise RuntimeError('Name not specified')\n # optional settings\n if 'bits' in cfg_item:\n [bits] = map(int, cfg_item['bits'])\n else:\n bits = bits_global\n if 'connect' in cfg_item:\n connect = cfg_item['connect']\n else:\n connect = []\n if 'input' in cfg_item:\n if input_node is None:\n input_node = name\n else:\n raise RuntimeError('More than one input node specified')\n if 'output' in cfg_item:\n if output_node is None:\n output_node = name\n else:\n raise RuntimeError('More than one output node specified')\n\n # make filter node\n if name not in filter_nodes:\n if bits is not None:\n if node == 'Const':\n filter_nodes[name] = Const(bits)\n elif node == 'Add':\n filter_nodes[name] = Add(bits)\n elif node == 'Delay':\n filter_nodes[name] = Delay(bits)\n elif node == 'Multiply':\n if 'factor_bits' in cfg_item:\n [factor_bits] = cfg_item['factor_bits']\n factor_bits = int(factor_bits)\n else:\n factor_bits = factor_bits_global\n if 'norm_bits' in cfg_item:\n [norm_bits] = cfg_item['norm_bits']\n norm_bits = int(norm_bits)\n else:\n norm_bits = norm_bits_global\n if (factor_bits is not None and norm_bits is not None):\n filter_nodes[name] = Multiply(\n bits, factor_bits, norm_bits)\n if 'factor' in cfg_item:\n [factor] = cfg_item['factor']\n factor = float(factor)\n filter_nodes[name].set_factor(factor, norm=True)\n else:\n raise ValueError('Unknown node type: %s' % node)\n else:\n raise RuntimeError('Number of bits for node \"%s\" not specified' \\\n % name)\n adjacency[name] = connect\n else:\n raise RuntimeError('Node \"%s\" already present' % name)\n\n # make filter\n if input_node is None:\n raise RuntimeError('No input node specified')\n elif output_node is None:\n raise RuntimeError('No output node specified')\n else:\n return Filter(filter_nodes, adjacency, input_node, output_node)", "def add_filters(fnames):\n with Database(writable=True) as base:\n for fname in fnames:\n with open(fname, 'r') as f_fname:\n filter_name = f_fname.readline().strip('# \\n\\t')\n filter_type = f_fname.readline().strip('# \\n\\t')\n filter_description = f_fname.readline().strip('# \\n\\t')\n filter_table = np.genfromtxt(fname)\n # The table is transposed to have table[0] containing the\n # wavelength and table[1] containing the transmission.\n filter_table = filter_table.transpose()\n # We convert the wavelength from Å to nm.\n filter_table[0] *= 0.1\n\n print(\"Importing {}... ({} points)\".format(filter_name,\n filter_table.shape[1]))\n\n new_filter = Filter(filter_name, filter_description, filter_type,\n filter_table)\n\n # We normalise the filter and compute the effective wavelength.\n # If the filter is a pseudo-filter used to compute line fluxes, it\n # should not be normalised.\n if not filter_name.startswith('PSEUDO'):\n new_filter.normalise()\n else:\n new_filter.effective_wavelength = np.mean(\n filter_table[0][filter_table[1] > 0]\n )\n\n base.add_filter(new_filter)", "def loadFile(filterExt):\n basicFilter = \"*.\" + filterExt\n filePath = fileDialog2(fileFilter=basicFilter, dialogStyle=2, fm=1)\n if(filePath != None):\n #openfile = open('/Users/camtton/Desktop/drawing.svg', 'r')\n tokens = getSVGpath(filePath[0])\n return tokens\n else:\n print 'Please select a %s file'%(filterExt)", "def loadData(name):\n inputs = []\n outputs = []\n with open(name) as file:\n data = file.readlines()[2:]\n lines = map(str.split, data)\n for line in lines:\n inputs.append(preparePatterns(line[:-1]))\n outputs.append(float(line[-1]))\n length = len(inputs[0])\n return inputs, outputs, length", "def ascii_to_filter(filename, filter_name=None, detector=None, temperature=None, \n filter_type=None, wcol=0, tcol=None, **kwargs):\n strg = \"Reading a MiriFilter model from an ASCII file \"\n strg += \"is not longer supported.\"\n raise NotImplementedError(strg)", "def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def load_filter_file(self, filter_path):\n logger.debug(\"Adding filter file {}\", filter_path)\n try:\n with open(filter_path, \"r\") as filter_file:\n try:\n json_filter_data = json.load(filter_file)\n except Exception as err:\n msg = \"Unable to parse filter file {} as a json file. {!r}\".format(\n filter_path, err)\n logger.debug(msg)\n raise errors.ParserError(msg)\n except IOError:\n raise errors.ParserError(\n \"Unable to access filter path '{}'\".format(filter_path))\n\n if \"version\" not in json_filter_data:\n raise errors.ParserError(\n \"Loading filter-file {} failed. Missing 'version' key.\".format(\n filter_path))\n\n if \"filters\" not in json_filter_data:\n raise errors.ParserError(\n \"Loading filter-file {} failed. Missing 'filters' key.\".format(\n filter_path))\n\n if not isinstance(json_filter_data[\"version\"], dict):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting value of 'version' entry to be a dictionary \"\n \"but instead its a {}.\".format(filter_path,\n type(json_filter_data[\"version\"])))\n\n version_info = json_filter_data[\"version\"]\n\n if \"major\" not in version_info:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'major' key in 'version' value.\".format(filter_path))\n\n if \"minor\" not in version_info:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'minor' key in 'version' value.\".format(filter_path))\n\n if not isinstance(version_info[\"major\"], int):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting int for major version found {} instead.\".format(\n filter_path, type(version_info[\"major\"])))\n\n if not isinstance(version_info[\"minor\"], int):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting int for minor version found {} instead.\".format(\n filter_path, type(version_info[\"minor\"])))\n\n if version_info[\"major\"] != FILTER_JSON_FORMAT_MAJOR_VERSION:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Found unexpected major version in JSON filter file.\".format(\n filter_path))\n\n self._add_filters(json_filter_data[\"filters\"], filter_path)", "def load_filter_evaluation(db_path):\n engine = create_engine('sqlite:///' + db_path)\n return pd.read_sql_table(TmFilterEval.__tablename__, engine)", "def read_filter_file(self, path, fmt = \"ascii\",\n names = (\"wavelength\", \"throughput\"),\n wavelength_u = u.angstrom, verbose = False):\n if utils.check_file_path(os.path.abspath(path), verbose = verbose):\n self.data = Table.read(path, format = fmt, names = names)\n self.wavelength = self.data[\"wavelength\"] * wavelength_u\n if verbose: print(\"1\", np.nanmax(self.wavelength))\n self.wavelength = self.wavelength.to(u.angstrom)\n self.throughput = self.data[\"throughput\"]\n if verbose: print(\"2\", np.nanmax(self.wavelength))\n\n self.wavelength_u = self.wavelength.to(wavelength_u)\n self._filter_file_path = path\n if verbose: print(\"3\", np.nanmax(self.wavelength))\n\n filename = path.split('/')[-1]\n filename_no_extension = filename.split('.')[0]\n self.filter_name = filename_no_extension\n if verbose: print(\"4\", np.nanmax(self.wavelength))\n\n self.set_plot_colour(verbose = verbose)\n if verbose: print(\"5\", np.nanmax(self.wavelength))\n self.calculate_effective_wavelength()\n if verbose: print(\"6\", np.nanmax(self.wavelength))\n self.calculate_edges()\n if verbose: print(\"7\", np.nanmax(self.wavelength))\n self.get_zeropoint()\n if verbose: print(\"8\", np.nanmax(self.wavelength))\n\n else:\n warnings.warn(\"Foo\")", "def load_vgg(file):\n vgg_layers = scipy.io.loadmat(file)['layers'][0]\n filters = {}\n for k in range(len(vgg_layers)):\n if vgg_layers[k][0][0][1][0] == 'conv':\n weights = np.array(vgg_layers[k][0][0][2][0][0])\n biases = np.reshape(vgg_layers[k][0][0][2][0][1], -1)\n filters['layer_{}'.format(k+1)] = [weights, biases]\n else:\n filters['layer_{}'.format(k+1)] = []\n return filters", "def LoadSourceFilter(coverable_file_name):\n \n with open(coverable_file_name, \"r\") as cov_file:\n file_list = [line.strip() for line in cov_file.readlines()]\n return SourceFilter(file_list)", "def fromfile(cls, f):\n raise NotImplementedError(\"ScalableRedisLocalBloomFilter not support fromfile\")", "def get_filters(filepath):\n filters = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n filter_id = row[\"Filter Column\"]\n filters.setdefault(filter_id, {})\n filters[filter_id][\"results\"] = row[\"Result\"].split(\", \")\n filters[filter_id][\"type\"] = row[\"Type\"]\n filters[filter_id][\"description\"] = ''.join(row[\"Description\"])\n return filters", "def load_all_filters(self, interp=True, lamb=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in s.content]\n return(filters)", "def _load_eval(self, eval_path):\n with open(eval_path, 'r') as fb:\n images = list()\n setmap = {'0': set(), '1': set(), '2': set()}\n for line in fb.readlines():\n image, tag = line.split()\n setmap[tag].add(image)\n images.append(image)\n return images, setmap['0'], setmap['1'], setmap['2']", "def loadFilterFromString(spec):\n return _loadPluginFromString(spec, \"ufo2ft.filters\", isValidFilter)", "def load_dat(file_name):\n data = loadmat(file_name)\n y = data['y']\n X = data['X']\n return X,y", "def _dataset(filename, filter, img_count=1000000):\n try:\n # Attempt to load the dataset.\n with np.load(filename) as data:\n X = data['arr_0']\n y = data['arr_1']\n except:\n # The dataset does not exist, so we regenerate.\n\n # Set up a sample of random images:\n sample_size = (img_count, 3, 3, 3) # 3x3 windows, each containing 3 channels\n images = np.random.random(sample_size)\n\n # The correct label for each \"image\" is the color at its center\n y = images[:, 1, 1, :]\n\n # Now we apply the filter to each of our images and store the filtered image\n print(\"Generating dataset:\")\n\n X = np.zeros(images.shape)\n\n for i in range(images.shape[0]):\n thisImg = images[i]\n filtered = filter.apply(thisImg)\n X[i] = filtered\n\n if (i + 1) % (img_count / 100) == 0:\n print(\"%s: %d%% done\" % (filename, 100 * (i + 1) / img_count))\n\n print(\"Dataset generation complete.\")\n\n np.savez(filename, X, y)\n\n return X[:img_count], y[:img_count]", "def _load_data(filepath):\n matrix = []\n vids = []\n with open(filepath, 'r') as fin:\n fin.readline()\n for line in fin:\n row = np.zeros(1+2+category_cnt+lang_cnt+1)\n vid, publish, duration, definition, category, detect_lang, _, _, _, _, _, re30, _ = line.rstrip().split('\\t', 12)\n vids.append(vid)\n row[0] = np.log10(int(duration))\n if definition == '0':\n row[1] = 1\n else:\n row[2] = 1\n row[3+category_dict[category]] = 1\n row[3+category_cnt+lang_dict[detect_lang]] = 1\n row[-1] = float(re30)\n matrix.append(row)\n print('>>> Finish loading file {0}!'.format(filepath))\n return matrix, vids", "def intialize_source():\n raw = loadmat(\"p300backrec2.mat\")\n channels_raw = raw['channels']\n channels = []\n for i in channels_raw[0]:\n channels.extend(list(i))\n X = raw['data']\n marker = raw['marker']\n return X,channels,marker", "def load(filename):\n lines = [l.strip('\\r\\n ') for l in open(filename, 'r').readlines()]\n lines = [l for l in lines if l != '']\n dims = [re.split(r'\\s+', l) for l in lines]\n f = np.array([[float(f) for f in d] for d in dims])\n return f", "def load_local_filters_index(cache_dir=CACHE_DIR):\n # create index from all filters present in cache (over reading from a file)\n # so that index is always in sync with what is present on disk\n local_filters_index = [os.path.splitext(os.path.relpath(path, cache_dir))[0]\n for path in glob(f\"{cache_dir}/*/*/*.vot\")]\n\n # TODO: Return a dataframe consisting of filter id and filter properties\n # (by reading params from filters' votable files)\n return local_filters_index", "def read_flow(filename):\n with open(filename, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n data = np.fromfile(f, np.float32, count=int(2*w*h))\n # Reshape data into 3D array (columns, rows, bands)\n return np.resize(data, (h[0], w[0], 2))", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n filters = [self._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def __init__(self, init=None, filter_table=None, filter_name=None,\n filter_type=None, **kwargs):\n super(MiriFilter, self).__init__(init=init, **kwargs)\n\n # Data type is filter.\n self.meta.filetype = 'FILTER'\n \n # Define the filter name and type, if given\n if filter_name is not None:\n self.meta.instrument.filter = filter_name\n if filter_type is not None:\n self.meta.instrument.filter_type = filter_type\n\n if filter_table is not None:\n try:\n self.filter_table = filter_table\n except (ValueError, TypeError) as e:\n strg = \"filter_table must be a numpy record array or list of records.\"\n strg += \"\\n %s\" % str(e)\n raise TypeError(strg)\n \n # Define the wavelength units.\n# units = self.get_data_units('filter_table')\n \n # Cached arrays\n self._wavelength = None\n self._transmission = None\n self._interptransmission = None", "def read_data(self):\n self.data = reduce_spectrum(self.filename)", "def load_and_preprocess_example_file(self, file_path: tf.Tensor) -> (np.ndarray, int):\n file_path_string = file_path.numpy().decode('utf-8')\n if file_path_string.endswith('.npy'):\n lightcurve = np.load(file_path_string)\n elif file_path_string.endswith('.pkl'):\n lightcurve = pd.read_pickle(file_path_string)['flux'].values\n elif file_path_string.endswith('.feather'):\n lightcurve = pd.read_feather(file_path_string)['flux'].values\n else:\n raise ValueError(f'Unknown extension when loading data from {file_path_string}')\n lightcurve = self.preprocess_and_augment_lightcurve(lightcurve)\n return lightcurve.astype(np.float32), [self.is_positive(file_path_string)]", "def load_filters(self, filters):\n if not filters:\n return\n if not isinstance(filters, list):\n raise errors.ParserError(\"Expecting 'filters' value to be a list \"\n \"but instead its a {}.\".format(type(filters)))\n for filter_path in filters:\n if not os.path.exists(filter_path):\n raise errors.ParserError(\n \"Filter path '{}' doesn't exist\".format(filter_path))\n elif os.path.isdir(filter_path):\n self._load_filter_directory(filter_path)\n else:\n self.load_filter_file(filter_path)", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def load_velo_scans(velo_files):\n scan_list = []\n for filename in velo_files:\n scan = np.fromfile(filename, dtype=np.float32)\n scan_list.append(scan.reshape((-1, 5)))\n return scan_list", "def load_func(file, threshold=100, max_filter=3):\r\n imgfile=file.split('\\\\')[-1]\r\n m = imgfile.split('_')[2]\r\n ss= imgfile.split('_')[4]\r\n s = ss.split('.bmp')[0]\r\n \r\n img = Image.open(file)\r\n #\r\n npImage= np.array(Image.open(file).convert('L'))\r\n #Get brightness range \r\n px_min=np.min(npImage)\r\n px_max=np.max(npImage)\r\n #Make a Look up table to scale the image values\r\n LUT=np.zeros(256, dtype=np.uint8)\r\n LUT[px_min:px_max+1]=np.linspace(start=0,\r\n stop=255,\r\n num=(px_max-px_min)+1,\r\n endpoint=True,\r\n dtype=np.uint8)\r\n #Apply LUT and save resulting Image\r\n img = Image.fromarray(LUT[npImage])\r\n #find edges\r\n img= img.filter(ImageFilter.FIND_EDGES)\r\n #binarize\r\n def pixelProc(intensity):\r\n \"\"\"\r\n simple threshold function \r\n \"\"\"\r\n th=threshold\r\n if intensity > th:\r\n return 255\r\n else:\r\n return 0\r\n \r\n img = img.split()[0].point(pixelProc)\r\n \r\n #dilation\r\n img= img.filter(ImageFilter.MaxFilter(max_filter))\r\n \r\n img_array=np.array(img)\r\n #activate to see the image after treatmen\r\n #img.show()\r\n \r\n return img_array, m, s", "def load_all_filters(self, interp=True, lamb=None):\n raise NotImplementedError", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'data'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def loadFNIRS(self,filepath):\r\n self.tree = ET.parse(filepath)\r\n self.data = self.tree.getroot().find(\"data\")\r\n self.samplerate = float(self.tree.getroot().find('device').find('samplerate').text)\r\n self.sensors = [i.text for i in self.tree.getroot().find('columns')]\r\n self.sensorMask = [True]*len(self.sensors)\r\n self.measurements = len(self.tree.getroot().find('data'))", "def load_data(filename):\n data = []\n with open('data/' + filename) as raw_data:\n for line in raw_data.readlines():\n data.append(float(line.strip('\\n')))\n return data\n # data = np.mat(np.genfromtxt('data/' + filename)).T\n # return data", "def load_matrix_from_raw_samples(samples_dir, scaling_factor=100):\n if not os.path.isdir(os.path.abspath(samples_dir)):\n print(\"Directory not found\")\n return None\n\n sample_files = sorted(glob.glob(os.path.join(os.path.abspath(samples_dir), \"*.32fc\")))\n\n if not sample_files:\n print(f\"No raw samples found in {samples_dir}\")\n return None\n\n all_antenna_inputs = []\n for samp_file in sample_files:\n\n with open(samp_file, \"rb\") as rf:\n data = load_bytes_from_fd(rf)\n\n data = [scaling_factor * complex(data[i], data[i + 1]) for i in range(0, len(data), 2)]\n all_antenna_inputs.append(data)\n\n# all_antenna_inputs = [[\"{rp} {ip}\".format(rp=x.real, ip=x.imag) for x in y] for y in all_antenna_inputs]\n\n return all_antenna_inputs", "def __get_data(self, filters):\n if not os.path.exists(CACHE_FILE):\n raise DataNotScrappedError()\n df = pd.read_csv(CACHE_FILE)\n if not filters:\n return list(df.T.to_dict().values())\n\n filtered_df = df[df['name'] == filters][['category', 'name']]\n\n return list(filtered_df.T.to_dict().values())", "def load_all_filters(self, interp=True, lamb=None):\n return [self._load_filter(k, interp=interp, lamb=lamb)\n for k in self.content]", "def read_file(path_file):\n with open(path_file, 'r') as f:\n L = f.readlines()\n if len(L[0]) == 9:\n #Y file\n matrix = np.zeros(len(L)-1)\n for index, l in enumerate(L):\n if index > 0:\n matrix[index-1] = 2*int(l.split(',')[1])-1\n elif len(L[0]) == 7:\n #X file\n matrix = np.chararray((len(L)-1,100))\n for index, l in enumerate(L):\n if index > 0:\n matrix[index-1,:] = list(l.split(',')[1][:-2])\n elif len(L[0]) > 100:\n #X_mat100 file\n matrix = np.zeros((len(L),100))\n for index, l in enumerate(L):\n matrix[index, :] = list(map(float, l.split(\" \")))\n else:\n assert('ERROR')\n return(matrix)", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def load(fname):\r\n try:\r\n with open(fname, 'r') as f:\r\n V, H = (int(a) for a in next(f).split())\r\n W, i2w, w2i = np.zeros((V, H)), [], {}\r\n for i, line in enumerate(f):\r\n parts = line.split()\r\n word = parts[0].strip()\r\n w2i[word] = i\r\n W[i] = list(map(float, parts[1:]))\r\n i2w.append(word)\r\n return W, i2w, w2i, V, H\r\n except:\r\n print(\"Error: failing to load the model to the file\")", "def load_field(self, filename,unmask=True,timeslice=None,fieldname=None,\n check_for_grid_info=False,grid_info=None,grid_type='HD',\n **grid_kwargs):\n\n print(\"Reading input from {0}\".format(filename))\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n return np.loadtxt(filename,np.float64).reshape(grid.get_grid_dimensions())", "def datread(file=None, header=0):\n with open(file, 'r') as fr:\n op = np.array([list(map(float, l.split())) for l in fr.readlines()[header:]])\n return op", "def _load(self):\n\n # number of non-data header details at top of data file\n header = 1\n\n # open file\n weatherData = []\n with open(self.wfile) as myfile:\n if (self.lines > 0):\n weatherData = [next(myfile) for x in xrange(self.lines + header)]\n else:\n weatherData = myfile.readlines()\n\n # get data stream from first line\n streamHeader = weatherData.pop(0).rstrip()\n if (streamHeader == 'FULL'):\n self.dataStream = 0\n elif (streamHeader == 'ADVANCED'):\n self.dataStream = 1\n elif (streamHeader == 'BASIC'):\n self.dataStream = 2\n else:\n print \"Error: unecognised data stream from file %s\" % (self.wfile)\n return -1\n\n # read data\n inputData = []\n for line in weatherData:\n entries = line.split()\n inputData.append(entries)\n\n # copy all into np array\n self.data = np.array(inputData)\n\n return 0", "def get_special_filters(filepath):\n filters = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n function = row[\"Function\"]\n filters.setdefault(function, {})\n filters[function][\"description\"] = row[\"Description\"]\n filters[function][\"parameters\"] = row[\"Parameters\"].split(\",\")\n filters[function][\"example\"] = row[\"Example\"]\n return filters", "def load_gred_dat(self, wannier_txt_file, index_word = \"WANNIER FUNCTIONS - LIST OF ACTIVE BANDS\", permutation = None):\n f = open(wannier_txt_file, \"r\")\n F = f.read()\n f.close()\n F = os.linesep.join([s for s in F.splitlines() if s]) #remove empty lines\n F = F.split(index_word)[1].split(\"WANNIER\")[0].split(\"G = \")\n \n bands = np.array([literal_eval(i) for i in F[0].split()])-1 # indexing begins at 0\n\n for i in np.arange(1,len(F[1:])+1):\n # Reading block index vector\n \n \n G = -1*np.array([literal_eval(j) for j in F[i].split(\")\")[0].split(\"(\")[1].split()])\n\n gmap = self.mapping[self._c2i(G)]\n \n # parse block\n \n B = F[i].split(\")\")[1]\n \n # read elements in block\n\n for line in B.split(\"\\n\")[1:]:\n # note : Crystal is column-major (fortran)\n row_list = [literal_eval(j) for j in line.split()]\n if len(row_list)!=0:\n if len(row_list)==1:\n # row_list contains index\n columns = np.array(row_list) -1\n else:\n if type(row_list[1]) is int:\n # line contains indices\n columns = np.array(row_list) -1\n \n else:\n # line contains elements\n row = row_list[0] - 1\n elements = np.array(row_list[1:]) \n \n self.blocks[ gmap ][row, columns + bands[0]] = elements #row and column \n return bands", "def load_phot_from_files(self, path = False, snname = False, prefix = 'SN',\n file_type = '.dat', names = ('MJD', 'flux', 'flux_err', 'filter'),\n format = 'ascii', filter_file_type = '.dat', verbose = False):\n\n if snname:\n if not path:\n path = self._default_data_dir_path\n ## Find matching photometry\n phot_list = find_filter_phot(path = path, snname = snname, prefix = prefix,\n file_type = file_type, verbose = verbose)\n\n full_phot_table = Table()\n\n ## Loop over files (shouldn't be that many really)\n if len(phot_list) > 0:\n\n for phot_file in phot_list:\n\n if verbose: print(phot_file)\n phot_table = Table.read(phot_file, names = names, format = format)\n\n ## NOTE astropy vstack does not support mixin columns http://docs.astropy.org/en/stable/table/mixin_columns.html\n # This means I might have problems joining the tables together if I don't add together as I go along.\n\n full_phot_table = vstack([full_phot_table, phot_table])\n\n filter_string = functions.get_filter_from_filename(phot_file, snname, file_type)\n phot_table.meta = {\"filename\" : phot_file,\n \"filter\" : filter_string,\n \"filter_filename\": filter_string + filter_file_type}\n\n ## Sort out units\n phot_table.sort(\"MJD\")\n phot_table[\"t\"] = Time(phot_table[\"MJD\"], format = 'mjd')\n\n phot_table[\"MJD\"].unit = u.day\n phot_table[\"flux\"].unit = u.cgs.erg / u.si.angstrom / u.si.cm ** 2 / u.si.s\n phot_table[\"flux_err\"].unit = phot_table[\"flux\"].unit\n\n ## Put in dictionary - use filter from the file\n filter_key = np.unique(phot_table[\"filter\"])[0]\n if verbose: print(len(np.unique(phot_table[\"filter\"])) , phot_table.meta[\"filter\"], filter_key)\n\n if len(np.unique(phot_table[\"filter\"])) > 1 or filter_key != phot_table.meta[\"filter\"]:\n raise errors.FilterMismatchError(\"There is a mismatch between the filter filename and that in the \"\n + \"photometry file\")\n\n self.data[filter_key] = phot_table\n\n path_to_filter = os.path.join(self.filter_directory, phot_table.meta['filter_filename'])\n self.data_filters[filter_key] = functions.load_filter(path_to_filter)\n\n\n ## NOTE doing it this way because vstack doesn't like mixin columns (see above comment)\n full_phot_table.sort(\"MJD\")\n # full_phot_table[\"t\"] = Time(full_phot_table[\"MJD\"], format = 'mjd')\n full_phot_table[\"MJD\"].unit = u.day\n\n full_phot_table[\"flux\"].unit = u.cgs.erg / u.si.angstrom / u.si.cm ** 2 / u.si.s\n full_phot_table[\"flux_err\"].unit = full_phot_table[\"flux\"].unit\n\n self.phot = full_phot_table\n\n ## Sort the OrderedDict\n self._sort_phot()\n else:\n warnings.warn(\"Couldn't find any photometry\")\n else:\n warnings.warn(\"Provide a SN name\")\n\n pass", "def load_input(file_name, elements):\n\n input_file = open(file_name)\n input_data = []\n\n while True:\n chunk = input_file.readline()\n\n if(chunk == ''):\n break\n \n ret = load_chunk(chunk, elements)\n\n # Convert data to frequency domain using fft()\n input_data.append([i.real for i in fft(ret)])\n\n return input_data", "def loadIRcsv(f):\n import numpy as np\n from StringIO import StringIO\n frame = np.zeros((240,320),dtype=float)\n for i in range(240):\n l=f.readline()\n s=StringIO(l.strip('\\r\\n'))\n if s:\n frame[i]=np.genfromtxt(s,delimiter=',')\n else:\n print('Did not load the line.\\n')\n return 0\n return frame#[35:195,80:260] ", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def load(self, filename):\n data = np.load(temp_dir + '/' + filename + '.npz')\n return data['chip_ids'], data['core_ids'], data['cx_ids']", "def read_prefiltered_data(self):\n logger.info(\"Reading prefiltered data from file: %s...\" % os.path.basename(self.filled_output_file))\n self.df = pd.read_csv(self.filled_output_file)", "def filter_array(image: Image, filter_id: str) -> Image:\n \n if filter_id == \"3\":\n image = three_tone(image,\"aqua\",\"blood\",\"lemon\")\n elif filter_id == \"X\":\n image = extreme_contrast(image)\n elif filter_id == \"T\":\n image = sepia_filter(image)\n elif filter_id == \"P\":\n image = posterize(image)\n elif filter_id == \"E\":\n image = detect_edges(image,15)\n elif filter_id == \"V\":\n image = flip_vertical(image)\n elif filter_id == \"H\":\n image = flip_horizontal(image)\n \n return image", "def process_results_file(f_path):\n results = pd.read_csv(f_path, sep='\\t', header=0)\n keep_cols = {'GS', 'SIZE', 'ES', 'NES', 'p-val'}\n results = results[:20].filter(keep_cols)\n return results", "def load(filename):\n arr = np.loadtxt(filename)\n mfs = [FramesMatch(*e) for e in arr]\n return FramesMatches(mfs)", "def load_pr(fname):\n try:\n data = np.loadtxt(fname, delimiter=',', dtype=np.float64, ndmin=2)\n except (ValueError, IndexError):\n # Try using whitespace delim (default).\n data = np.loadtxt(fname, delimiter=None, dtype=np.float64, ndmin=2)\n # If category is not -1, then filter by pedestrian.\n _, num_cols = data.shape\n if CATEGORY_COLUMN < num_cols and not np.all(data[:, CATEGORY_COLUMN] == -1):\n data = data[data[:, CATEGORY_COLUMN] == POSITIVE_CATEGORY, :]\n return data", "def load_data(self):\n return numpy.fromfile(self.data_fname, dtype=numpy.float32)", "def inithr(_filename):\n # Open file provided\n _file = open(_filename)\n # Create empty array to hold data\n _data = np.zeros((1, 3), dtype=float)\n\n # Iterate through the file line by line\n for _line in _file:\n # Split each line into constituent values\n _x = _line.split()\n # Append data array with each value, converted to float, convert parallax angle to distance\n _data = np.append(_data, np.array([float(_x[1]), float(_x[2]), (1 / float(_x[3]))], ndmin=2), axis=0)\n\n # Iterate through data array\n for _row in _data:\n np.seterr(divide='ignore')\n # Convert magnitude to luminosity\n _row[0] = _row[0] - 5 * (np.log10(_row[2]) - 1)\n # Convert B-V colour to temperature\n _row[1] = 4600 * ((1 / (0.92 * _row[1] + 1.7)) + 1 / (0.92 * _row[1] + 0.62))\n\n # Delete first empty row\n _data = np.delete(_data, 0, axis=0)\n\n # Return parsed data\n return _data", "def readData():\n fileName = sys.argv[1]\n inputArray = []\n with open(fileName) as csvFile:\n reader = csv.reader(csvFile)\n arraySlice = []\n for row in reader:\n arraySlice = (row[235:587])\n if arraySlice[0] != \"\":\n arraySlice = [float(i) for i in arraySlice]\n inputArray.append(arraySlice)\n csvFile.close()\n return inputArray", "def parse_data(filepath):\n settings = dict()\n intensity = list()\n # Boolean flags to check when to start/stop\n # reading parameters\n read_params = False\n read_int = False\n read_zeeman = False\n finished = False\n fieldoff_intensities = list()\n fieldon_intensities = list()\n with open(filepath) as read_file:\n for line in read_file:\n if \"*****\" in line:\n read_int = False\n if finished is True:\n break\n if \"Scan\" in line:\n if \"[Field ON]\" in line:\n read_zeeman = True\n scan_details = line.split()\n settings[\"ID\"] = int(scan_details[1])\n # settings[\"Date\"] = str(scan_details[4])\n read_params = True\n read_int = False\n continue\n if read_int is True:\n if read_zeeman is False:\n fieldoff_intensities += [float(value) for value in line.split()]\n else:\n fieldon_intensities += [float(value) for value in line.split()]\n finished = True\n if read_params is True and len(line.split()) > 1:\n # Read in the frequency step, frequency, and other info\n # needed to reconstruct the frequency data\n scan_params = line.split()\n shift = 1\n settings[\"Frequency\"] = float(scan_params[0])\n settings[\"Frequency step\"] = float(scan_params[1])\n if len(scan_params) == 4:\n settings[\"Multiplier\"] = 1.\n shift = 0\n # If the multiplier data is there, we don't shift the read\n # index over by one\n else:\n settings[\"Multiplier\"] = float(scan_params[2])\n settings[\"Center\"] = float(scan_params[2 + shift])\n settings[\"Points\"] = int(scan_params[3 + shift])\n read_params = False\n # Start reading intensities immediately afterwards\n read_int = True\n continue\n fieldoff_intensities = np.array(fieldoff_intensities)\n fieldon_intensities = np.array(fieldon_intensities)\n\n # Generate the frequency grid\n settings[\"Frequency step\"] = settings[\"Frequency step\"] * settings[\"Multiplier\"]\n # This calculates the length of either side\n side_length = settings[\"Frequency step\"] * (settings[\"Points\"] // 2)\n start_freq = settings[\"Frequency\"] - side_length\n end_freq = settings[\"Frequency\"] + side_length\n frequency = np.linspace(start_freq, end_freq, settings[\"Points\"])\n\n return frequency, fieldoff_intensities, fieldon_intensities, settings", "def read_inputs(self):\n #inputs \n inputs = {}\n # read inputs\n c = 1\n with open(self.path) as f:\n lines = f.readlines()\n for line in lines:\n data = line.rstrip(os.linesep).rstrip(',').split(',')\n input = np.array([np.float64(i) for i in data])\n inputs['image'+str(c)] = input\n c += 1\n\n\n\n return inputs", "def load(filename):\n return sio.loadmat(filename, appendmat=False, squeeze_me=True)['data']", "def read_from_file(jjjj, N, path='/data/users/mklymenko/science/H2_100/programing/dis/v0/'):\n\n p1 = np.loadtxt(path+'ff_'+str(jjjj)+'.dat')\n\n a = []\n a = np.where(\n ((p1[:, 1] == 111) & (p1[:, 2] == 111) & (p1[:, 3] == 111)))[0]\n\n n1 = N\n X = np.array(p1[a[n1]+1:a[n1+1], 0])\n Y = np.array(p1[a[n1]+1:a[n1+1], 1])\n Z = np.array(p1[a[n1]+1:a[n1+1], 2])\n F = np.array(p1[a[n1]+1:a[n1+1], 3])\n\n return (np.vstack((X, Y, Z)), F)", "def _load_fmri(fmri_filenames):\n a = np.array([pd.read_csv(subject_filename,\n header=None).values\n for subject_filename in fmri_filenames])\n Z = np.zeros((500, a[0].shape[1]))\n\n for i in range(len(a)):\n Z[:a[i].shape[0], :] = a[i]\n a[i] = Z[:TRUNC, ]\n Z = 0*Z\n\n return a", "def load_rf_data(filename):\n A = np.loadtxt(filename, dtype=\"float32\", delimiter=\",\")\n\n X = A[:, :10]\n y = A[:, -1]\n\n return X, y", "def load(src_path):\n satdat = rasterio.open(src_path)\n return satdat", "def loadData():\n datfile = glob.glob(DATA_PATH + 'consolidated.npy')\n return np.load(datfile[0])", "def loadData(infile,k):\n f = open(infile,'r')\n #f = f.read().split(\"\\n\")\n #raw = json.loads(f[1])\n f = f.read()\n raw = json.loads(f)\n data = np.array(raw)\n dataset = data[k]\n return dataset", "def load_data():\n x = np.genfromtxt(X_FILE, usecols=(0, 1))\n y = np.genfromtxt(Y_FILE, usecols=(0))\n\n return x, y", "def get_flt_info(files=[], columns=['FILE', 'FILTER', 'INSTRUME', 'DETECTOR', 'TARGNAME', 'DATE-OBS', 'TIME-OBS', 'EXPSTART', 'EXPTIME', 'PA_V3', 'RA_TARG', 'DEC_TARG', 'POSTARG1', 'POSTARG2']):\n import astropy.io.fits as pyfits\n from astropy.table import Table\n \n if not files:\n files=glob.glob('*flt.fits')\n \n N = len(files)\n \n data = []\n\n for i in range(N):\n line = [os.path.basename(files[i]).split('.gz')[0]]\n if files[i].endswith('.gz'):\n im = pyfits.open(files[i])\n h = im[0].header\n else:\n h = pyfits.Header().fromfile(files[i])\n \n filt = get_hst_filter(h)\n line.append(filt)\n has_columns = ['FILE', 'FILTER']\n for key in columns[2:]:\n if key in h:\n line.append(h[key])\n has_columns.append(key)\n else:\n continue\n \n data.append(line)\n \n tab = Table(rows=data, names=has_columns)\n return tab", "def load(filename):\n return np.load(filename)", "def read_2d_analysis_data(f):\n \n data = np.transpose(np.loadtxt(f, dtype=np.float64))\n x = data[0]\n y = data[1]\n\n return x, y", "def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=True)\n assert (\n result == \"-180 180 -90 90 -6147.49072266 5164.06005859 1 1 360 180 1 1\\n\"\n )", "def load_matrix(self, src_dir, key_word=\"funneled\"):\r\n X = []\r\n Y = []\r\n label = 0\r\n for root, dirs, files in os.walk(src_dir):\r\n if files != []:\r\n for file in files:\r\n if key_word in file:\r\n img = cv2.imread(os.path.join(root, file), cv2.IMREAD_GRAYSCALE)\r\n min_value = np.min(img)\r\n max_value = np.max(img)\r\n X.append((img.flatten() - min_value)/(max_value - min_value)) # Normalize the data to [0, 1]\r\n Y.append(label)\r\n label +=1\r\n \r\n return dict(X = np.asarray(X), \r\n Y = np.asarray(Y))", "def _initialize_data_filter(self):\n df_params = self._loading_params.copy()\n df_params[\"filter_negate\"] = True\n df_params[\"filter_upper\"] = True\n self._data_filter = LoadProcessedData(**df_params)", "def test_filter_samples_from_distance_matrix_file_input(self):\r\n actual = filter_samples_from_distance_matrix(self.input_dm1,\r\n [\"GHI blah\", \"XYZ\"])\r\n self.assertEqual(actual, expected_dm1a)\r\n actual = filter_samples_from_distance_matrix(self.input_dm1,\r\n [\"GHI\", \"DEF\"])\r\n self.assertEqual(actual, expected_dm1b)", "def load_scans_filter(img_org, filterdata):\n\n # check which filter will be used and apply that one\n filter = filterdata['filtername']\n if filter == 'gaussian':\n sigma = filterdata['parameters'][0]\n smoothed_img = calc_gaussian(img_org, sigma=sigma)\n elif filter == 'median':\n radius = filterdata['parameters'][0]\n smoothed_img = calc_median(img_org, radius=radius)\n elif filter == 'curvatureflow':\n iter = filterdata['parameters'][0]\n timestep = filterdata['parameters'][1]\n smoothed_img = calc_curvatureflow(img_org, iteration=iter, step=timestep)\n elif filter == 'anisodiff':\n iter = filterdata['parameters'][0]\n timestep = filterdata['parameters'][1]\n conductance = filterdata['parameters'][2]\n smoothed_img = calc_anisodiff(img_org, iteration=iter, step=timestep, conductance=conductance)\n else:\n print('The filtername does not exist.')\n\n return smoothed_img", "def read_flt_file(filename):\n\n fid = open(filename,'rb')\n arr = array.array('i')\n arr.fromfile(fid, 1) # dim\n dim = arr[0]\n #http://www.python.org/search/hypermail/python-1993/0393.html\n if dim>100:\n \"\"\"print 'Read very high dimension (>100).'\n print 'Endianness may come into play.'\n print 'Try to swap the byte order.'\"\"\"\n swap = True;\n arr.byteswap()\n dim = arr[0]\n #print 'dim =',dim\n else:\n swap = False\n assert(dim>=1 and dim<=4) # only accept data up to 4 dimensions.\n\n arr = array.array('i')\n arr.fromfile(fid,dim+2)\n if swap:\n arr.byteswap()\n volume = reduce(lambda x,y: x*y, arr[0:dim-1], 1)\n\n binvalues = array.array('f')\n binvalues.read(fid, volume*arr[dim-1])\n if swap:\n binvalues.byteswap()\n fid.close()\n\n data = numpy.array(binvalues, numpy.float)\n data = numpy.reshape(data, (arr[dim-1], volume))\n\n return (arr[:dim],data)", "def _read_samples(self):\n\n logging.debug(\"Start file parsing.\")\n data = pd.read_csv(self._source_file, header=None)\n \n data = pd.read_csv(self._source_file, header=None)\n header = pd.read_csv(self._header_file, delimiter=':', skiprows=1, header=None)\n header.columns = ['column', 'column_type']\n\n data.columns = header.column.tolist() + ['attack']\n data['attack'] = data['attack'].str.replace('.', '')\n data['label'] = 1\n data.loc[data['attack'] == 'normal', 'label'] = 0\n\n symbolic_columns = header.loc[header.column_type == ' symbolic.'].column.tolist()\n # print(symbolic_columns)\n\n for scol in symbolic_columns:\n data[scol] = pd.Categorical(data[scol])\n one_hot_cols = pd.get_dummies(data[scol], prefix=scol)\n data = pd.concat([data, one_hot_cols], axis=1)\n\n data = data.drop(columns=symbolic_columns)\n data = data.drop(columns=['attack'])\n\n # data.loc[data.attack != 'normal' , ['attack', 'label']].head(20)\n\n data_normal = data.loc[data['label'] == 0]\n data_abnormal = data.loc[data['label'] == 1]\n\n data_normal_train = data_normal.sample(frac=0.7)\n data_normal_test = data_normal.loc[~data_normal.index.isin(data_normal_train.index)]\n\n data_normal_train = data_normal_train.drop(columns=['label']).values\n data_normal_test = data_normal_test.drop(columns=['label']).values\n data_abnormal = data_abnormal.drop(columns=['label']).values\n \n scaler = MinMaxScaler()\n _ = scaler.fit(data_normal_train)\n data_normal_train = scaler.transform(data_normal_train)\n data_normal_test = scaler.transform(data_normal_test)\n data_abnormal = scaler.transform(data_abnormal)\n \n logging.debug('Normal {}; Train {}; Test{}'.format(data_normal.shape, data_normal_train.shape, data_normal_test.shape))\n logging.debug('Abnormal {}'.format(data_abnormal.shape))\n\n samples = {}\n samples['NORMAL'] = data_normal_train\n samples['NORMAL_TEST'] = data_normal_test\n samples['ABNORMAL_TEST'] = data_abnormal\n\n logging.debug(\"End file parsing.\")\n\n return samples", "def load_file(file_name) -> np.ndarray:\r\n reader = csv.reader(open(file_name, \"r\"), delimiter=',')\r\n x_rdr = list(reader)\r\n return np.array(x_rdr).astype('float')", "def get_filtered_data(filtered_data_file_name=\"filtered_module_data.p\", category='entities_visual_module',\n load_entities=True):\n\n filemanager = FilesManager()\n\n # Load the filtered file\n filtered_module_data = filemanager.load_file(\n \"{0}.{1}.{2}\".format(DATA, VISUAL_GENOME, get_name_from_file(filtered_data_file_name)))\n\n if load_entities:\n entities = list(filtered_module_data[category])\n else:\n entities = None\n\n hierarchy_mapping_objects = dict(filtered_module_data['object_ids'])\n hierarchy_mapping_predicates = dict(filtered_module_data['predicate_ids'])\n\n # Delete the whole data, is no longer needed\n del filtered_module_data\n return entities, hierarchy_mapping_objects, hierarchy_mapping_predicates", "def filter_w2v_nomenclature(w2vfilename,filter_nomenclature):\n V = set(filter_nomenclature)\n wordlist = []\n veclist = [] \n istream = open(w2vfilename)\n istream.readline() #skips w2v header\n for line in istream:\n fields = line.split()\n word = fields[0]\n if word in V:\n vec = np.array([float(elt) for elt in fields[1:]])\n wordlist.append(word)\n veclist.append(vec)\n istream.close()\n M = np.array(veclist)\n return (wordlist,M)", "def load_matches(file):\r\n mat = np.loadtxt(file, dtype=int)\r\n ret_lst = []\r\n for row in range(mat.shape[0]):\r\n ret_lst.append(((mat[row, 0], mat[row, 1]), (mat[row, 2], mat[row, 3])))\r\n return ret_lst", "def loadFilters(ufo):\n preFilters, postFilters = [], []\n for filterDict in ufo.lib.get(FILTERS_KEY, []):\n namespace = filterDict.get(\"namespace\", \"ufo2ft.filters\")\n try:\n filterClass = getFilterClass(filterDict[\"name\"], namespace)\n except (ImportError, AttributeError):\n from pprint import pformat\n\n logger.exception(\"Failed to load filter: %s\", pformat(filterDict))\n continue\n filterObj = filterClass(\n *filterDict.get(\"args\", []),\n include=filterDict.get(\"include\"),\n exclude=filterDict.get(\"exclude\"),\n pre=filterDict.get(\"pre\", False),\n **filterDict.get(\"kwargs\", {}),\n )\n if filterObj.pre:\n preFilters.append(filterObj)\n else:\n postFilters.append(filterObj)\n return preFilters, postFilters" ]
[ "0.66520554", "0.657589", "0.65067047", "0.64896256", "0.63868827", "0.6371127", "0.626495", "0.5997112", "0.59562635", "0.5929189", "0.59240067", "0.5835717", "0.58037555", "0.5700353", "0.56678426", "0.5623952", "0.5620478", "0.55956954", "0.5576885", "0.55100983", "0.550417", "0.55021715", "0.5485636", "0.5467415", "0.5460615", "0.54592407", "0.54575944", "0.5457457", "0.5453629", "0.54457957", "0.542522", "0.5419263", "0.5405198", "0.53813183", "0.5368278", "0.5361454", "0.5360949", "0.53438735", "0.53315884", "0.5326437", "0.531536", "0.53056896", "0.52986723", "0.52973676", "0.5294212", "0.5293464", "0.5291209", "0.5289903", "0.5257759", "0.5255535", "0.5252505", "0.5238986", "0.52218664", "0.5214181", "0.521148", "0.5207132", "0.52003145", "0.5195945", "0.5195799", "0.51952654", "0.51932365", "0.5186874", "0.51866937", "0.51854414", "0.5184214", "0.5181986", "0.5174641", "0.5173731", "0.5173458", "0.51645786", "0.5156193", "0.5155503", "0.5154256", "0.5138169", "0.51343274", "0.51280755", "0.5126139", "0.5125667", "0.51202774", "0.5110995", "0.5109484", "0.5105581", "0.5105139", "0.5102799", "0.5101441", "0.5098193", "0.5096143", "0.5095973", "0.50910884", "0.50906533", "0.50871325", "0.5084723", "0.508418", "0.5082016", "0.50757295", "0.5075678", "0.50729203", "0.5072643", "0.50523454", "0.5049961" ]
0.7438268
0
returns boolean, whether xy is occupied in filter matrix
def filtered(filter, xy): try: x, y = xy return bool(filter[x][y]) except IndexError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xy_occupied(xy, board):\n return True if board[xy[0]][xy[1]] else False", "def occupiedNeighbor(self, xi, yi):\n\n xmax = self.mapData.og.info.width\n ymax = self.mapData.og.info.height\n\n if self.mapData.sampled:\n # Fails on an occupied cell\n assert self.mapData.mapArrayS[xi, yi] < 50\n for x in range(max(xi - 1, 0), min(xi + 1, xmax)):\n for y in range(max(yi - 1, 0), min(yi + 1, ymax)):\n if self.mapData.mapArrayS[x,y] > 50:\n return True\n return False\n else:\n # Fails on an occupied cell\n assert self.mapData.mapArray[xi, yi] < 50\n for x in range(max(xi - 1, 0), min(xi + 1, xmax)):\n for y in range(max(yi - 1, 0), min(yi + 1, ymax)):\n if self.mapData.mapArray[x,y] > 50:\n return True\n return False", "def occupied(self, (xIndex, yIndex)):\n return xIndex < 0 or yIndex < 0 or \\\n xIndex >= self.xN or yIndex >= self.yN or \\\n self.grid[xIndex][yIndex]", "def contains(self, xy):\n if np.ndim(xy) == 2:\n xp = xy[:, 0]\n yp = xy[:, 1]\n elif (np.ndim(xy) == 1) and (len(xy) == 2):\n xp = xy[0]\n yp = xy[1]\n else:\n raise ValueError(\"crazy\")\n\n xinside = (self.x0 <= xp) & (xp <= self.x1)\n yinside = (self.y0 <= yp) & (yp <= self.y1)\n return xinside & yinside", "def contains(self, x):\n # need more to assure its a real SSP - ie on right torus\n return (len(x) == self._shape[0])", "def full(self):\n for x in range(0,3):\n for y in range(0,3):\n if self[x,y] is None:\n return False\n return True", "def __contains__(self, x):\n indexes = self.get_indexes(x)\n return self.sketch[indexes] > 0", "def inside(i,j,im,h=H): #X\n return i-h >=0 and j-h >=0 and i+h+1<=im.shape[0] and j+h+1<=im.shape[1]", "def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)", "def fullGrid(state):\n return not ((state[:, :, 0] + state[:, :, 1]) == 0).any()", "def point_in_map(self, x, y):\r\n return 0 <= x < self.width and 0 <= y < self.height and (x,y) not in self.walls", "def __contains__(self, point):\n for component, dim in zip(point, self.dimensions):\n if component not in dim:\n return False\n return True", "def filter(x,y):\n if tf.reduce_sum(y) > pixels:\n return True\n else:\n return False", "def filter_tile_neighbors(self, coord):\n coord = coord.int_tuple\n if coord[1] <= self.MAX_Y and coord[0] <= self.MAX_X and coord[1] >= \\\n 0 and coord[0] >=\\\n 0 and (self.currentmap.boxAt(coord[0], coord[1])\n == 0 or self.currentmap.boxAt(coord[0], coord[1]) == 2):\n return True\n return False", "def is_at_intersection(self):\n directions = 0\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n if self.internal_map[self.tile[0] - 1][self.tile[1]] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0] + 1][self.tile[1]] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0]][self.tile[1] - 1] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0]][self.tile[1] + 1] not in ('x', ):\n directions += 1\n return True if directions > 2 else False", "def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])", "def contains(self, coord):\n # print(coord, self.position, self.size)\n return (0 <= coord[0] - self.position[0] < self.size[0] and\n 0 <= coord[1] - self.position[1] < self.size[1])", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def test(self, grid, flag):\n x = self.x+SPEED_X[flag]\n y = self.y+SPEED_Y[flag]\n return 0 <= x < self.n and 0 <= y < self.n and grid[y][x] == 1", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1", "def _is_occupied(\n grid: List[List[str]], row: int, col: int, dx: int, dy: int) -> bool:\n while 0 <= (row + dy) < len(grid) and 0 <= (col + dx) < len(grid[0]):\n row += dy\n col += dx\n if grid[row][col] == 'L':\n return False\n if grid[row][col] == '#':\n return True\n return False", "def is_visible(self, x, y) :\n\t\tres_x = (x > self.x_min) and (x < self.x_max)\n\t\t# print 'res_x : {0}, x : {1}, x_min : {2}, x_max:{3}'.format(res_x, x, self.x_min, self.x_max)\n\t\tres_y = (y > self.y_min) #and (y < self.y_max)\n\t\treturn res_x and res_y", "def __cell_is_occupied(self, x, y) -> bool:\n return self.occupancy_map.data[self.__get_cell_index(x, y)] != 0", "def solved(self):\n return all(cell == 1 for row in self.faces for cell in row) or all(cell == 0 for row in self.faces for cell in row)", "def is_occupied(self, p):\r\n return 0 <= p[0] < self.width and 0 <= p[1] < self.height and self.grid[p[1]][p[0]] == '#'", "def __cell_is_in_map(self, x, y) -> bool:\n return x >= 0 and y >= 0 and x < self.occupancy_map.info.width and y < self.occupancy_map.info.height", "def is_in_field(self, x, y):\n return (self.origin_x <= x < self.width) and (self.origin_y <= y < self.height)", "def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean", "def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True", "def visited(self, row, col):\n return (row, col) in self._visited", "def is_filled(self, x, y):\n if not (0 <= x and x < self.map_size[0]\n and 0 <= y and y < self.map_size[1]):\n return False\n\n # Is there something else than Floor?\n return (len(self.map.get_cell_nodes(x, y)) > 1)", "def IsTransparent(self,coord): \r\n x,y=coord\r\n if x<0 or x>=self.size[0] or y<0 or y>=self.size[1]: return False \r\n return self.map[x][y].transparent", "def see_occupant(self, x, y, dx, dy):\r\n if dx == 0 and dy == 0: # Makes looping easier\r\n return False\r\n x += dx\r\n y += dy\r\n while 0 <= x < self.width and 0 <= y < self.height:\r\n if self.grid[y][x] == '#':\r\n return True\r\n if self.grid[y][x] == 'L':\r\n return False\r\n x += dx\r\n y += dy\r\n return False", "def checkBox(self, x, y):\n used = []\n for i in range(3):\n for j in range(3):\n cur = self.board[x+i][y+j]\n if cur not in used:\n if cur !=0:\n used += [cur]\n else:\n return False\n return True", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def __bool__(self):\n return self.fam.c_nonzero(self)", "def xy_off_board(xy, board):\n return False if 0 <= xy[0] < len(board) and 0 <= xy[1] < len(board) else True", "def contains(self, x):\n return (isinstance(x, int) and x >= 0 and x < self._dim)", "def isComplete(grid):\n for row in range(0,9):\n for col in range(0,9):\n if grid[row][col]==0:\n return False\n return True", "def _is_valid_land(x, y, grid):\n return (x >= 0) and (x < len(grid)) and (y >= 0) and (y < len(grid[0])) and grid[x][y]", "def tileOccupied(self, i, j):\n if self.tiles[i][j] == 1 or i == 0 or i == self.size[0] - 1 or j == 0 or j == self.size[1] - 1:\n return True\n for prop in self.props:\n if prop.i == i and prop.j == j:\n return True\n return False", "def __nonzero__(self):\n return not self.as_point == (0, 0)", "def is_valid_index(x, y, l_matrix):\n return x < l_matrix and y < l_matrix and x > -1 and y > -1", "def _has_coordinates_and_gradient(self) -> bool:\n return self._coords is not None and self._coords.g is not None", "def _is_visible(self, point):\n return point[0] > 0 and point[0] < 1 and point[1] > 0 and point[1] < 1", "def _inside(self, x, y):\n wx, wy, w, h = self._raw_graph_window_dim()\n if wx <= x < wx + w and wy <= y < wy + h:\n return True\n return False", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def pixel_at(self, x, y):\n return self.arr[x, y, 1] == 255", "def check_masked (self, pos : list,) :\n count = 0\n total = 0\n for x in range(pos[0],min(pos[0] + AUTO_width1, self.m_x)) :\n for y in range(pos[1], min(pos[1] + AUTO_width1, self.m_y)) :\n total += 1\n if self.current_grid[x][y] :\n count += 1\n if count/total > 0.5 :\n return True\n else :\n return False", "def check_empty(self, coord):\n x, y, z = coord\n if self.perlin_3d(x, y, z) <= 0:\n return True\n else:\n return False", "def is_percolates(self):\n return self._uf.connected(self._top_idx, self._bottom_idx)", "def _board_is_full(self):\n return (self.get_counts()[0] + self.get_counts()[1] == self._num_rows * self._num_cols)", "def is_blank(self):\n return not any(self._1 in _row for _row in self._pixels)", "def is_contiguous(arr):\n mn, mx = min(arr), max(arr)\n s = sum(arr)\n sn = (mn*(mn-1))/2 if mn!=0 else 0\n sx = (mx*(mx+1))/2\n if s == sx-sn:\n return True\n else:\n return False", "def check_free(self, arr):\n cell_location = self.cartesian_to_cell(arr)\n cell = self.occ_matrix[cell_location[0], cell_location[1]]\n return cell == 0", "def valid(black, white, x, y):\n return (not black & gobit[(x, y)]) and (not white & gobit[(x, y)])", "def check_ext(im, i, j):\n neighb = 0\n count = 0\n for a in range(8):\n if (im[i+relpos[a][0], j+relpos[a][1]] and (count == 0)):\n count += 1\n neighb += 1\n else:\n count = 0\n return (neighb < 2)", "def active_piece_contains(self, coords):\n return coords in self.active_piece", "def _fix_uniq_col(self):\n # subgradient; for two boolean arrays, multiplication seems to be the best way \n # (equivalent to logical_and)\n n_covered_col = self.a_csr.dot(np.ones(self.ncols)) \n ifix = np.zeros(self.ncols, dtype=bool)\n if (np.count_nonzero(n_covered_col) != self.mrows):\n raise ValueError(\"There are uncovered rows! Please check your input!\")\n if (np.any(n_covered_col==1)):\n inonzero = self.a_csr[n_covered_col==1,:].nonzero()\n ifix[inonzero[1]] = True\n\n return ifix", "def is_valid_room(self, x, y):\r\n return 0 <= x < self.__nx and 0 <= y < self.__ny", "def _idxs_are_present(self, *args):\n return set(args).issubset(set(range(self.n_atoms)))", "def valid(self, pos):\n return self.m.shape[0] > pos[0] >= 0 and self.m.shape[1] > pos[1] >= 0 and self.m[pos] == 0", "def on_board(hexe):\n\n cube = axial_to_cube(hexe)\n\n # check each bound\n for axis in cube:\n if abs(axis) > BOARD_BOUND:\n return False\n return True", "def is_in_map(self, x_ind, y_ind):\n return not (x_ind < self.origin[0] or\n x_ind > self.origin[0] + self.n * self.resolution or\n y_ind < self.origin[1] or\n y_ind > self.origin[1] + self.n * self.resolution)", "def is_null(self) -> bool:\n for y in range(0, self.num_of_rows):\n for x in range(0, self.num_of_cols):\n if self._A[y][x] != 0:\n return False\n return True", "def is_valid(i, j, shape):\n return i >= 0 and j >= 0 and i < shape[0] and j < shape[1]", "def inBounds(self, px, py):\n return px >= 0 and py >= 0 and px < self.w and py < self.h", "def contains_origin(self):\n return self.contains(self.ambient_space().zero())", "def is_alive(self, x: int, y:int) -> bool :\n return self.table[y][x]", "def inside(self, x: int, y: int, z: int, chunk: bool=False) -> bool:\n factor = 32 if chunk else 512\n rx = x // factor\n rz = z // factor\n return not (rx != self.x or rz != self.z or y < 0 or y > 255)", "def no_neighbour(x: int, y: int) -> bool:\r\n if not wall_check(x, y-1, False):\r\n if example[x, y-1] == 0:\r\n return False\r\n if not wall_check(x, y+1, False):\r\n if example[x, y+1] == 0:\r\n return False\r\n if not wall_check(x+1, y, False):\r\n if example[x+1, y] == 0:\r\n return False\r\n if not wall_check(x-1, y, False):\r\n if example[x-1, y] == 0:\r\n return False\r\n return True", "def xy_suicide(xy, board, color):\n group = xy_to_group(xy, board)\n\n if group_adjacents(group, board, color) == group_adjacents(group, board, filter_by=\"foe\"):\n for xy_adj in xy_adjacents(xy, board, filter_by=\"foe\", color=color):\n group_adj = xy_to_group(xy_adj,board)\n if group_is_surrounded(group_adj,board):\n return False\n return True\n else:\n return False", "def test_filter_coords_matrix(self):\r\n m = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\r\n expected = array([[1], [4], [7], [10]])\r\n assert_almost_equal(filter_coords_matrix(m, 1), expected)\r\n expected = array([[1, 2], [4, 5], [7, 8], [10, 11]])\r\n assert_almost_equal(filter_coords_matrix(m, 2), expected)", "def is_valid(x, y):\n return (x >= 0) & (x < BOARD_SIZE) & (y >= 0) & (y < BOARD_SIZE)", "def count_neighbor_flags(self, x, y):\n\t\treturn sum(self.marks[n][m] == FLAG for (n, m) in self.get_valid_neighbors(x, y))", "def inrange(cc, point):\n return point.row in range(cc.top, cc.bottom+1) and point.col in range(cc.left, cc.right+1)", "def checkAvailable(self, x, y):\n return 0 <= x < self.rows and 0 <= y < self.cols and not self.gridBusy[x][y]", "def check_free_space(environment, target_xy, fovea):\n temp_image = check_target_position(environment, target_xy, fovea)\n if np.array_equal(temp_image, np.zeros(temp_image.shape)):\n return True\n else:\n return False", "def is_in_the_grid(self, row: int, col: int) -> bool:\n return 0 <= row < self.n_row and 0 <= col < self.n_col", "def interior_contains(self, point):\n try:\n p = vector(point)\n except TypeError: # point not iterable or no common ring for elements\n if len(point)>0:\n return False\n else:\n p = vector(self.field(), [])\n\n if len(p)!=self.ambient_dim():\n return False\n \n for H in self.Hrep_generator():\n if not H.interior_contains(p):\n return False\n return True", "def _check_if_position_on_board(coord: tuple, board_size: int):\n in_row = coord[0] in range(board_size)\n in_col = coord[1] in range(board_size)\n return in_row and in_col", "def checkRow(self, x):\n used = []\n for y in range(len(self.board[0])):\n cur = self.board[x][y]\n if cur not in used:\n if cur !=0:\n used += [cur]\n else:\n return False\n return True", "def satifiesWinConditions(self, coordinates):\n if self.treasureCaptured and (self.x, self.y) in coordinates:\n return True\n else:\n return False", "def check_grid(grid: List):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return False\n return True", "def in_row(row, x, y,\n input_suduko_3d):\n value_in_row = np.in1d(np.asarray(input_suduko_3d[x, y, 0:]), np.asarray(input_suduko_3d[row, :, 0]))\n for i, value in enumerate(value_in_row):\n if value_in_row[i] == True and i != 0:\n input_suduko_3d[x, y, i] = 0", "def flags_nearby(self, y, x):\n count = 0\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n if xe >= self.x or ye >= self.y:\n continue\n if self.table_state[ye][xe] == Minesweeper.FLAG:\n count += 1\n return str(count)", "def valid(point):\n index = offset(point)\n if tiles[index] == 0:\n return False\n\n index = offset(point + 19)\n\n if tiles[index] == 0:\n return False\n\n return point.x % 20 == 0 or point.y % 20 == 0", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def contains(self, point):\n if in_range(point[0], self.xrange) and in_range(point[0], self.yrange) and in_range(point[0], self.zrange):\n return True\n return False", "def valid_coordinates(self, x, y):\n return ((x >= 0) and (x < self.width) and\n (y >= 0) and (y < self.height))", "def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False", "def is_occupied(self, pos):\n return any([p == pos for p in self._workers.values()])", "def _check_zero(self, h, i, j, u, v, w):\n return self._.p[u, h, i] != 0 and self._.p[v, h, j] != 0 and \\\n self._.p[w, i, j] != 0", "def healthy_test(obj: np.ndarray) -> bool:\n nb_rows, nb_cols = obj.shape\n return nb_rows == nb_cols > 1 and np.array_equal(obj, colony(nb_rows))", "def available(self, position):\n if position is not None:\n x, y = position\n return self.grid[x][y] == 0", "def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH", "def wall_check(x: int, y: int, state: bool) -> bool:\r\n if state:\r\n if x == 0 or x == shape-1 or y == 0 or y == shape-1:\r\n return True\r\n else:\r\n if x < 0 or x >= shape or y < 0 or y >= shape:\r\n return True\r\n return False", "def check_2x2_solved(self):\n return self._grid[0][0] == 0 and self._grid[0][1] == 1 \\\n and self._grid[1][0] == self._width*1 and self._grid[1][1] == (1 + self._width * 1)", "def isInsideImage(x, y, nx, ny, imageNx, imageNy):\r\n return ( ((x+nx) < imageNx) and ((y+ny) < imageNy) )", "def is_full(self, row, col):\n self._validate_indexes(row, col)\n return self._uf.connected(self._top_idx, row * self._n + col)" ]
[ "0.6924757", "0.6860181", "0.6504782", "0.6499604", "0.6476733", "0.6406629", "0.6393374", "0.63787687", "0.6288766", "0.6284607", "0.6230571", "0.62087417", "0.6196269", "0.61808234", "0.6141441", "0.612595", "0.61152285", "0.61060596", "0.60959953", "0.60850585", "0.60841113", "0.60813856", "0.6079903", "0.6060185", "0.60549986", "0.6052309", "0.6033238", "0.60327077", "0.60222626", "0.60032696", "0.59782004", "0.59424895", "0.5932495", "0.5924623", "0.59157604", "0.5912242", "0.58995837", "0.5891721", "0.58886135", "0.5881907", "0.58801407", "0.5871019", "0.5848356", "0.5840079", "0.58268774", "0.5804603", "0.58032495", "0.5803122", "0.57995147", "0.5785336", "0.57811993", "0.57736135", "0.57717645", "0.5769341", "0.5763813", "0.5752648", "0.5748129", "0.5746166", "0.5743564", "0.57368386", "0.57352185", "0.5721411", "0.57206273", "0.5718243", "0.57116264", "0.57019824", "0.570088", "0.56967264", "0.5696094", "0.5694786", "0.56924975", "0.56912225", "0.5689741", "0.5687875", "0.56863606", "0.5677088", "0.56755716", "0.56745946", "0.5671495", "0.5666265", "0.566415", "0.56554264", "0.56522566", "0.5650468", "0.5649113", "0.56377107", "0.56291413", "0.5629055", "0.562895", "0.56276834", "0.5616735", "0.56113255", "0.560998", "0.56095445", "0.5608892", "0.5601403", "0.559985", "0.5595969", "0.55953246", "0.5594481" ]
0.71220756
0
Do all the grunt work of the snaking values, ordering the filtered blanks, and basically all the heavy lifting
def create_matrix(totals, filter, start=0, odd=False): i = start blank = '' x, y = 0, 0 total_x, total_y = totals # matrix is represented as # # row are down # t-> 1 7 # 2 9 # 3 10 # 4 11 # 5 12 # 6 13 # Transposed with zip later matrix = [] while y < total_y: row = [] blank_pos = [] while x < total_x: # TODO: Blank all points before starting coords if bool(i % 2) != odd: # xor operator for two boolean variables blank_pos.append(x) elif filtered(filter, (x, y)): blank_pos.append(x) x += 1 continue else: row.append(i) x += 1 i += 1 if y % 2: # if odd row.reverse() for pos in blank_pos: row.insert(pos, blank) matrix.append(row) y += 1 x = 0 final = list(map(list, zip_longest(*matrix))) print('final matrix') for f in final: print(f) return final
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def order_filter(self,elements):", "def sorted_data():\n stock_data = scrape_data()\n filtered_data = list(filter(sort_func, stock_data))\n return filtered_data", "def sort_by_default(self):\n self.data.sort()", "def sorting_by_criteria(self, result):\r\n\t\tresult = sorted(result, key=lambda r: r[0])\r\n\t\tflag = False\r\n\t\tm = result[0][0]\r\n\t\tfor i in range(len(result)):\r\n\t\t\tif (result[i][0] == m): continue\r\n\t\t\tflag = True\r\n\t\t\tbreak\r\n\t\tif not flag: i += 1\r\n\t\tresult = result[:i]\r\n\r\n\t\t\"\"\" in prewin status, compare useful_amount only \"\"\"\r\n\t\tif (result[0][0] == 0):\r\n\t\t\tresult = sorted(result, key=lambda r: r[1], reverse=True)\r\n\t\t\ttest = \"\"\r\n\t\t\tfor r in result:\r\n\t\t\t\ttest += \"[{0}, {1}, {2}, {3}], \".format(r[0], r[1], r[2], r[3])\r\n#\t\t\tprint \"prewin status: {0}\".format(test)\r\n\t\t\tself.current_best_state = [result[0][0], result[0][1], result[0][2]]\r\n\t\t\treturn result[0][3]\r\n\r\n\t\t\"\"\" sort by score (big -> small) \"\"\"\r\n\t\tresult = sorted(result, key=lambda r: r[2], reverse=True)\r\n\t\tflag = False\r\n\t\tm = result[0][2]\r\n\t\tfor i in range(len(result)):\r\n\t\t\tif (result[i][2] == m): continue\r\n\t\t\tflag = True\r\n\t\t\tbreak\r\n\t\tif not flag: i += 1\r\n\t\tresult = result[:i]\r\n\r\n\t\t\"\"\" sort by useful card amount (big -> small) \"\"\"\r\n\t\tresult = sorted(result, key=lambda r: r[1], reverse=True)\r\n\r\n\t\t\"\"\" choose one to discard \"\"\"\r\n\t\tdcard = result[0][3]\r\n\t\tm = result[0][1]\r\n\t\tbest = result[0]\r\n\t\tfor r in result:\r\n\t\t\tif (r[1] != m): break\r\n\t\t\tctype = GameBoard.CardType(r[3])\r\n\t\t\tif (ctype == 4) and (self.word_list.count(r[3]) == 1):\r\n\t\t\t\tdcard = r[3]\r\n\t\t\t\tbest = r\r\n\t\t\tif (ctype == 5) and (self.wind_list.count(r[3]) == 1):\r\n\t\t\t\tdcard = r[3]\r\n\t\t\t\tbest = r\r\n\t\tself.current_best_state = [r[0], r[1], r[2]]\r\n\t\treturn dcard", "def _sort_phot(self, verbose=False):\n if hasattr(self, \"data\") and hasattr(self, \"data_filters\"):\n ## This looks fugly.\n newkeys = np.array([i for i in self.data_filters.keys()])[np.argsort([self.data_filters[i].lambda_effective.value for i in self.data_filters])]\n\n sorted_data = OrderedDict()\n sorted_data_filters = OrderedDict()\n\n for newkey in newkeys:\n\n if verbose: print(newkey)\n\n sorted_data[newkey] = self.data[newkey]\n sorted_data_filters[newkey] = self.data_filters[newkey]\n\n self.data = sorted_data\n self.data_filters = sorted_data_filters\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass", "def filterRansac():\n pass", "def order_ideal(self, gens):", "def processing_function(raw):\r\n\r\n # Sort stewarded & unstewarded depts\r\n STEWARDED_DEPTS = set(stewards.keys()) & kt.ALL_DEPTS\r\n UNSTEWARDED_DEPTS = kt.ALL_DEPTS - STEWARDED_DEPTS\r\n\r\n ##############################\r\n # Filter data in all the ways\r\n ##############################\r\n actives = kt.filterdata(raw, kt.selectors.allactives)\r\n nc = kt.filterdata(actives, kt.selectors.northcampus)\r\n\r\n # International Students\r\n itnl = kt.filterdata(actives, kt.selectors.itnl)\r\n permres = kt.filterdata(actives, kt.selectors.permres)\r\n\r\n # Stewarded / Unstewarded\r\n stewarded= kt.filterdata(\r\n actives,\r\n lambda person: kt.selectors.bydept(person,STEWARDED_DEPTS)\r\n )\r\n unstewarded= kt.filterdata(\r\n actives,\r\n lambda person: kt.selectors.bydept(person,UNSTEWARDED_DEPTS)\r\n )\r\n\r\n # Hire Date\r\n newhires = kt.filterdata(\r\n actives, \r\n lambda person: kt.selectors.hiredafter(person,NEW_HIRE_DATE)\r\n )\r\n\r\n oldhires = kt.filterdata(\r\n actives, \r\n lambda person: kt.selectors.hiredbefore(person,NEW_HIRE_DATE)\r\n )\r\n\r\n nohiredate = kt.filterdata(actives, kt.selectors.nohiredate)\r\n\r\n # Degree Program\r\n phd = kt.filterdata(\r\n actives, \r\n lambda person: kt.selectors.bydegree(person,['PhD'])\r\n )\r\n masters = kt.filterdata(\r\n actives, \r\n lambda person: kt.selectors.bydegree(person, kt.MASTERS)\r\n )\r\n\r\n ###############\r\n # Count things\r\n ###############\r\n\r\n # Unit sizes\r\n bargaining_unit_size = len(actives)\r\n overall_members = kt.count_duespayers(actives)\r\n\r\n # Number of actives currently stewarded\r\n total_stewarded = len(stewarded)\r\n total_unstewarded = len(unstewarded)\r\n stewarded_members = kt.count_duespayers(stewarded)\r\n unstewarded_members = kt.count_duespayers(unstewarded)\r\n\r\n # International students\r\n total_intl = len(itnl)\r\n total_permres = len(permres)\r\n intl_members = kt.count_duespayers(itnl)\r\n permres_members = kt.count_duespayers(permres)\r\n\r\n # New Hires\r\n total_newhires = len(newhires)\r\n total_oldhires = len(oldhires)\r\n total_nohiredate= len(nohiredate)\r\n newhire_members = kt.count_duespayers(newhires)\r\n oldhire_members = kt.count_duespayers(oldhires)\r\n\r\n\r\n # Degree Program\r\n total_phd = len(phd)\r\n total_masters = len(masters)\r\n phd_members = kt.count_duespayers(phd)\r\n masters_members = kt.count_duespayers(masters)\r\n \r\n\r\n ######################################\r\n # Derived Results\r\n ######################################\r\n labels = []\r\n results= []\r\n\r\n labels += ['Current Bargaining Unit Size']\r\n results+= [bargaining_unit_size]\r\n\r\n labels += ['Relative Number of GSIs with >1 Steward (%)']\r\n results+= [(100.0*total_stewarded)/bargaining_unit_size]\r\n\r\n labels += ['Relative Number of International Students (%)']\r\n results+= [(100.0*total_intl)/bargaining_unit_size]\r\n\r\n # labels += ['Relative Number of NC Permanent Resident Students (%)']\r\n # results+= [(100.0*total_permres)/bargaining_unit_size]\r\n\r\n labels += ['']\r\n results+= ['']\r\n\r\n labels += ['Overall GEO Membership (%)']\r\n results+= [(100.0*overall_members)/bargaining_unit_size]\r\n\r\n labels += ['Membership Among Stewarded Depts (%)']\r\n results+= [(100.0*stewarded_members)/total_stewarded]\r\n\r\n labels += ['Membership Among Unstewarded Depts (%)']\r\n results+= [(100.0*unstewarded_members)/total_unstewarded]\r\n\r\n labels += ['']\r\n results+= ['']\r\n\r\n labels += ['Relative # of International Students (%)']\r\n results+= [(100.0*total_intl)/bargaining_unit_size]\r\n\r\n labels += ['Membership Among International Students (%)']\r\n results+= [(100.0*intl_members)/total_intl]\r\n\r\n # labels += ['Membership Among Permanent Residents (%)']\r\n # results+= [(100.0*permres_members)/total_permres]\r\n\r\n labels += ['']\r\n results+= ['']\r\n\r\n labels += ['Relative # of New Hires (%)']\r\n results+= [(100.0*total_newhires)/bargaining_unit_size]\r\n\r\n labels += ['Membership Among New Hires (%)']\r\n results+= [(100.0*newhire_members)/total_newhires]\r\n\r\n labels += ['Membership Among Old Hires (%)']\r\n results+= [(100.0*oldhire_members)/total_oldhires]\r\n\r\n labels += ['Number of People w/o Known Hire Dates']\r\n results+= [total_nohiredate]\r\n\r\n labels += ['']\r\n results+= ['']\r\n\r\n labels += ['Relative # of Masters Students (%)']\r\n results+= [(100.0*total_masters)/bargaining_unit_size]\r\n\r\n labels += ['Membership Among Masters Students (%)']\r\n results+= [(100.0*masters_members)/total_masters]\r\n\r\n labels += ['Membership Among PhD Students (%)']\r\n results+= [(100.0*phd_members)/total_phd]\r\n\r\n labels += ['']\r\n results+= ['']\r\n\r\n\r\n\r\n # Display summary results\r\n print('\\n')\r\n display_results(labels,results)\r\n\r\n print('Unstewarded Departments:')\r\n for d in UNSTEWARDED_DEPTS: print(d)\r\n\r\n print('\\n')\r\n\r\n\r\n\r\n # Print summary results to csv\r\n kt.writecsv_summary(zip(labels,results), OUT_FILE)\r\n\r\n\r\n\r\n # dump all local variables to file\r\n # v = locals()\r\n\r\n\r\n\r\n return None", "def sort_results(self):\n pass", "def sort_values(self):\r\n for loopindex in range(0, self.population_size):\r\n index = self.cost_populations.index(min(self.cost_populations))\r\n \r\n if loopindex < int(self.population_size / 2):\r\n self.best_districts.append(self.district_population[index])\r\n self.best_costs.append(self.cost_populations[index])\r\n else:\r\n self.worst_districts.append(self.district_population[index])\r\n \r\n del self.cost_populations[index]\r\n del self.district_population[index]", "def sort_by_place(sorted_lines):\n list1 = []\n list2 = []\n list3 = []\n statusestofilter = [\"SAVED\", \"EATEN\", \"SLAYED THE DRAGON HERSELF\"]\n if len(sorted_lines) == 0:\n return sorted_lines\n for i in range(len(sorted_lines)):\n if \"None\" in sorted_lines[i]:\n raise InvalidPrincessException(\"Invalid princess!\")\n for words in statusestofilter:\n if words in sorted_lines[i]:\n raise InvalidPrincessException(f\"The princess is already {words}!\")\n continue\n order = makeorder(sorted_lines)\n for word in sorted_lines:\n if order[0] in word:\n list1.append(word)\n continue\n if order[1] in word:\n list2.append(word)\n continue\n if order[2] in word:\n list3.append(word)\n continue\n result = list1 + list2 + list3\n return result", "def sort(self):\n def get_fval(res):\n return res.fval if not np.isnan(res.fval) else np.inf\n\n self.list = sorted(self.list, key=get_fval)", "def _preprocess_and_filter_original_dataset(data):\n\n label_order = (\"EMPTY\", \"50_SIGN\", \"70_SIGN\", \"80_SIGN\")\n\n filtered_data = []\n for image, signs in data:\n if not signs:\n filtered_data.append((image, label_order.index(\"EMPTY\")))\n else:\n # take the most visible of the interesting signs\n signs = [s for s in signs\n if s.name in label_order and s.visibility == \"VISIBLE\"]\n if signs:\n filtered_data.append((image, label_order.index(signs[0].name)))\n return filtered_data", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def sort_table(table, sats_table):", "def filter_and_sort_number_strings():\n# fill it out\n result = []\n for s in STRING_LIST:\n if (s.isnumeric()):\n result.append(s)\n return sorted(result)", "def sortSample(self, key, ascending):\n try:\n self.sample[self.sample['masked'] == False].sort_values(by=key, ascending=ascending)\n except:\n pass", "def sort_values(self):\n self._elements = list(\n _[-1] for _ in sorted((e.value, e.weight, e) for e in self)\n )", "def _reorder_collected(self, data):\n data_dict = {getattr(res, 'name', ''): res for res in data}\n for name, res in data_dict.items():\n if '}' in name:\n data.remove(res)\n data.append(res)\n return data", "def _sort(self):\n self.population.sort()\n self.population.reverse()", "def test_categories_are_sorted(self):\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])", "def get_sorted_min_seats(sort_keys, party_filter=None):\n if (party_filter==None):\n return list(dfUeberhang.sort_values(by=sort_keys)[\"Mindestsitzzahl\"])\n else:\n return list(dfUeberhang[dfUeberhang[\"party\"]==party_filter].sort_values(by=sort_keys)[\"Mindestsitzzahl\"])", "def test_prefilter_exact_prefixes_filtering(self):\r\n # maps to first when all are same length\r\n app = CdHitOtuPicker(params={})\r\n seqs = [('s1', 'ACGTAA'),\r\n ('s2', 'ACGTAC'),\r\n ('s3', 'ACGTAG'),\r\n ('s4', 'ACGTAT'),\r\n ('s5', 'ACGTCA'),\r\n ('s6', 'ACGTCC')]\r\n\r\n prefix_length = 5\r\n actual = app._prefilter_exact_prefixes(seqs, prefix_length)\r\n actual[0].sort()\r\n expected = [('s1', 'ACGTAA'), ('s5', 'ACGTCA')], \\\r\n {'s1': ['s1', 's2', 's3', 's4'], 's5': ['s5', 's6']}\r\n self.assertEqual(actual, expected)\r\n\r\n # maps to first when all are same length\r\n app = CdHitOtuPicker(params={})\r\n seqs = [('s1', 'ACGTAA'),\r\n ('s2', 'ACGTAC'),\r\n ('s3', 'ACGTAGAAAA'),\r\n ('s4', 'ACGTAT'),\r\n ('s5', 'ACGTCA'),\r\n ('s6', 'ACGTCC')]\r\n\r\n prefix_length = 5\r\n actual = app._prefilter_exact_prefixes(seqs, prefix_length)\r\n actual[0].sort()\r\n expected = [('s3', 'ACGTAGAAAA'), ('s5', 'ACGTCA')], \\\r\n {'s3': ['s1', 's2', 's3', 's4'], 's5': ['s5', 's6']}\r\n self.assertEqual(actual, expected)", "def _normalise(self):\n if len(self.user_skip) == 0:\n return\n m = min(self.user_skip.values())\n for x in [k for k in self.user_skip]:\n self.user_skip[x] -= m\n if self.user_skip[x] == 0 and not self.user_queue[x]:\n self._purge_user(x)\n if not self.user_order:\n self.nonempty.clear()", "def _sort_compounds(self):\n self.sorted_molecules = sorted(self.values(), key=operator.attrgetter('criterion'))", "def pre_sort(self, qs):\n return qs", "def sortby(self):\n ...", "def sort_sources(sources):\n\n result = sorted(\n sources.copy(),\n key=lambda x: x.lower().replace(\"-\", \"\").replace(\"_\", \"\").replace(\" \", \"\"),\n )\n\n # Steven Black's repositories/files/lists should be on top!\n steven_black_positions = [\n x for x, y in enumerate(result) if \"stevenblack\" in y.lower()\n ]\n\n for index in steven_black_positions:\n result.insert(0, result.pop(index))\n\n return result", "def _check_empty_and_sort_cost_pairs(self, pair_description, pairs):\n\n if pairs is None or len(pairs) == 0:\n raise ValueError(f\"Empty {pair_description} are provided.\")\n\n # sort based on power output\n pairs.sort(key=lambda p: p[0])\n\n return", "def _sort_by_value(cards: list, order: str) -> list:\n unsorted_cards = cards.copy()\n sorted_cards = []\n sort_fx = min if order == \"asc\" else max\n for _ in cards:\n smallest_card = sort_fx(unsorted_cards)\n unsorted_cards.remove(smallest_card)\n sorted_cards.append(smallest_card)\n return sorted_cards", "def sort_by_status(filtered_lines) -> list:\n patterns = [\"FIGHTS FOR LIFE\", \"INJURED\", \"IN PANIC\", \"BORED\"]\n fightforlife = []\n injured = []\n inpanic = []\n bored = []\n for word in filtered_lines:\n if patterns[0] in word:\n fightforlife.append(word)\n continue\n if patterns[1] in word:\n injured.append(word)\n continue\n if patterns[2] in word:\n inpanic.append(word)\n continue\n if patterns[3] in word:\n bored.append(word)\n continue\n result = fightforlife + injured + inpanic + bored\n return result", "def _reorder_collected(self, data):\n priority = {\n 'post': 1,\n 'get': 2,\n 'put': 2,\n 'patch': 2,\n 'head': 2,\n 'options': 2,\n 'delete': 3,\n }\n data = sorted(\n data,\n key=lambda x: priority.get(getattr(x, 'name', ''), 4))\n return data", "def sort(self):\r\n\t\treturn sorted(self.sample)", "def test_filter_remove(self):\n words = ['cart', 'fate', 'date', 'daft']\n filtered = filter_scores(score_words(words), 'fate', -1)\n self.assertEqual([(8, 'daft'), (7, 'date'), (7, 'cart')], filtered)", "def test_input_order_irrelevant(self):\n sorted_strings = ['1532-44349', '1534-44658', '1536-44582', '1536-44935', '1538-44874', '1538-44920']\n mutable_copy = list(sorted_strings)\n for i in range(10000):\n random.shuffle(mutable_copy)\n assert natsort(mutable_copy) == sorted_strings", "def sort(self):\n self.cards.sort()", "def sort(self):\n self.cards.sort()", "def sort(self):\n \n ct=[]\n rt=[]\n wr=[]\n # search for tags that aren't in the right position\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n if c.wa:\n if not self.wa:\n self.wa=[]\n self.wa.extend(c.wa)\n if c.ct:\n newcts=[ct_tag for ct_tag in c.ct if ct_tag.name!=c.name]\n map(self.contigs[i].ct.remove,newcts)\n ct.extend(newcts)\n for j in range(len(c.reads)):\n r = c.reads[j]\n if r.rt:\n newrts=[rt_tag for rt_tag in r.rt if rt_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].rt.remove,newrts)\n rt.extend(newrts)\n if r.wr:\n newwrs=[wr_tag for wr_tag in r.wr if wr_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].wr.remove,newwrs)\n wr.extend(newwrs)\n # now sort them into their proper place\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n for ct_tag in ct:\n if ct_tag.name==c.name:\n if self.contigs[i].ct is None:\n self.contigs[i].ct=[]\n self.contigs[i].ct.append(ct_tag)\n if rt or wr:\n for j in range(len(c.reads)):\n r = c.reads[j]\n for rt_tag in rt:\n if rt_tag.name==r.rd.name:\n if self.contigs[i].reads[j].rt is None:\n self.contigs[i].reads[j].rt=[]\n self.contigs[i].reads[j].rt.append(rt_tag)\n for wr_tag in wr:\n if wr_tag.name==r.rd.name:\n if self.contigs[i].reads[j].wr is None:\n self.contigs[i].reads[j].wr=[]\n self.contigs[i].reads[j].wr.append(wr_tag)", "def test_prefilter_exact_prefixes_all_to_one_filtering(self):\r\n # maps to first when all are same length\r\n app = CdHitOtuPicker(params={})\r\n seqs = [('s1 comment', 'ACGTAA'),\r\n ('s2', 'ACGTAC'),\r\n ('s3', 'ACGTAG'),\r\n ('s4', 'ACGTAT'),\r\n ('s5', 'ACGTCA'),\r\n ('s6', 'ACGTCC')]\r\n\r\n prefix_length = 4\r\n actual = app._prefilter_exact_prefixes(seqs, prefix_length)\r\n actual[0].sort()\r\n expected = [('s1', 'ACGTAA')], {'s1':\r\n ['s1', 's2', 's3', 's4', 's5', 's6']}\r\n self.assertEqual(actual, expected)\r\n\r\n # maps to longest seq\r\n app = CdHitOtuPicker(params={})\r\n seqs = [('s1', 'ACGTAA'),\r\n ('s2', 'ACGTACA'),\r\n ('s3', 'ACGTAG'),\r\n ('s4', 'ACGTAT'),\r\n ('s5', 'ACGTCA'),\r\n ('s6', 'ACGTCC')]\r\n\r\n prefix_length = 4\r\n actual = app._prefilter_exact_prefixes(seqs, prefix_length)\r\n actual[0].sort()\r\n expected = [('s2', 'ACGTACA')], {'s2':\r\n ['s1', 's2', 's3', 's4', 's5', 's6']}\r\n self.assertEqual(actual, expected)\r\n\r\n # maps to longest seq\r\n app = CdHitOtuPicker(params={})\r\n seqs = [('s1', 'ACGTAA'),\r\n ('s2', 'ACGTACA'),\r\n ('s3', 'ACGTAGAA'),\r\n ('s4', 'ACGTATAAA'),\r\n ('s5', 'ACGTCAAAAA'),\r\n ('s6', 'ACGTCCAAAAA')]\r\n\r\n prefix_length = 4\r\n actual = app._prefilter_exact_prefixes(seqs, prefix_length)\r\n actual[0].sort()\r\n expected = [('s6', 'ACGTCCAAAAA')\r\n ], {'s6': ['s1', 's2', 's3', 's4', 's5', 's6']}\r\n self.assertEqual(actual, expected)", "def list_cleanup(self, data):\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.required.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] != filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n if filter_value not in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] not in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if not found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.excluded.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] == filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n if filter_value in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n return data", "def get_sorted_direct_seats(sort_keys, party_filter=None):\n if (party_filter==None):\n return list(dfUeberhang.sort_values(by=sort_keys)[\"direct_seats\"])\n else:\n return list(dfUeberhang[dfUeberhang[\"party\"]==party_filter].sort_values(by=sort_keys)[\"direct_seats\"])", "def test_filter(self):\n words = ['card', 'fate', 'date', 'daft']\n filtered = filter_scores(score_words(words), 'card', 1)\n self.assertEqual([(9, 'date'), (8, 'fate'), (7, 'daft')], filtered)", "def data_preparation(self) -> None:\n self.logger.info('data cleaning')\n self.logger.info('num of secs: {}, num of ipo_dates: {}, num of secs with prices: {}'.format(\n len(self.data),\n len(self.ipo_dates),\n len(self.prices)\n ))\n excluded = []\n excluded = [i.lower() for i in excluded]\n self.logger.info(f'number of excluded: {len(excluded)}')\n for i in excluded:\n self.data.pop(i)\n for s in self.data:\n # columns with empty assets sum (empty columns and other situations)\n self.data[s].dropna(axis='columns', how='any', subset=['A_0'], inplace=True)\n # columns with descriptions (polish and english names of values)\n self.data[s].drop(self.data[s].columns[[0, 1]], inplace=True, axis=1)\n\n self.logger.info(f'number of secs after cleaning: {len(self.data)}')\n data_list = [k for k in self.data.values()]\n self.uber_data = pd.concat(data_list, ignore_index=True, axis=1)\n self.uber_data = self.uber_data.transpose()\n self.uber_data = self.uber_data.loc[:, pd.notnull(self.uber_data.columns)]", "def cleaning (data):", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = []\n temp=[]\n ### your code goes here\n for x in xrange(len(predictions)):\n cleaned_data.append((ages[x],net_worths[x],abs(net_worths[x]-predictions[x])))\n \n cleaned_data.sort(key= lambda tup : tup[2], reverse= False)\n cleaned_data=cleaned_data[:81]\n print(len(cleaned_data))\n return cleaned_data", "def stage_one_preprocessing(data: pd.Series) -> pd.Series:\n data_ = data.dropna()\n print('ascii')\n data_ = remove_non_ascii(data)\n print('lower')\n data_ = to_lowercase(data_)\n print('slash')\n data_ = underscore_and_slash_to_space(data_)\n print('ellipse')\n data_ = remove_ellipses(data_)\n print('white')\n data_ = shrink_whitespace(data_)\n #print('contracts')\n #data_ = remove_contractions(data_)\n return data_", "def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return", "def test_no_filter(self):\r\n\r\n d1 = {\"% IDENTITY\": \"97.6\"}\r\n d2 = {\"% IDENTITY\": \"0.0\"}\r\n d3 = {\"% IDENTITY\": \"100.0\"}\r\n\r\n self.assertTrue(no_filter(d1))\r\n self.assertTrue(no_filter(d2))\r\n self.assertTrue(no_filter(d3))", "def _sort(self):\n self.rows.sort(key=lambda x: (x['PERC1'], x['EQ'], x['PASS'], x['W2']),\n reverse=True)\n\n rank = 0\n prev_perc = 0\n prev_rank = 0\n for row in self.rows:\n if row[\"NR\"] == 0:\n # Something has already populated NR as 0 - so we set rank as\n # 0 too\n row['_RANK'] = 0\n row['_NR'] = 0\n continue\n\n # Increment our count\n rank += 1\n if row['PERC1'] == prev_perc:\n row['NR'] = \"\"\n row['_NR'] = prev_rank # I.e. joint 6th will be 6 here\n row['_RANK'] = rank # I.e. joint 6th could be 7, or 8 etc. here\n else:\n row['NR'] = rank\n row['_NR'] = rank\n row['_RANK'] = rank\n prev_perc = row['PERC1']\n prev_rank = rank", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = []\n\n ### your code goes here\n temp = abs(predictions-net_worths)\n for k in range(len(ages)):\n cleaned_data.append((ages[k][0],net_worths[k][0],temp[k][0]))\n cleaned_data = sorted(cleaned_data, key=lambda data:data[2])\n print(\"Length: \", len(cleaned_data))\n cleaned_data = cleaned_data[:len(cleaned_data)-9]\n print(\"Length: \", len(cleaned_data))\n return cleaned_data", "def asc(self):\n self.get_output = sorted((value, key) for (key, value) in self.get_output.items())", "def organize(select, strain, equals):\n scores = []\n data = list(strainer(select, strain, equals))\n while len(data) != 0:\n number = lowest_number(data)\n scores.append(number)\n data.remove(number)\n return scores", "def filter(self, filters):", "def calculate_finishing_order(x):\n\t# Creates a list of keys which are sorted by their values\n\n\treturn [sailor_names for sailor_names,sailorValues in sorted(x.items(), key=lambda y: y[1], reverse=True)]", "def sortChoices(self):\n self.formatList.sort()", "def normaliseandsort(slu_hyps):\n result = []\n sorted_hyps = slu_hyps.items()\n sorted_hyps.sort(key=lambda x: -x[1])\n total_score = sum(slu_hyps.values())\n for hyp, score in sorted_hyps:\n if total_score == 0:\n result.append({\"score\": 0, \"slu-hyp\": json.loads(hyp)})\n else:\n result.append({\"score\": min(1.0, score/total_score), \"slu-hyp\": json.loads(hyp)})\n return result", "def sort_and_reduce(self):\n self.data = sorted(self.data, key=lambda item: item.pubDate)\n if len(self.data) > MAX_SIZE:\n self.data = self.data[-MAX_SIZE:]", "def _fix_shortsort(self):\n test_dir = join_path(self.install_test_root, self.test_src_dir)\n filter_file(\"../src/\", \"\", join_path(test_dir, \"testshortsort.sh\"))", "def put_sorted_cards(result, cards, weight):\n result.append((cards2str(sort_cards(cards)), weight))", "def test_sort_otu_table_by_mapping_field_some_values_differ(self):\r\n\r\n actual = sort_otu_table_by_mapping_field(\r\n parse_biom_table_str(self.otu_table1),\r\n parse_mapping_file(\r\n self.mapping_f2),\r\n sort_field=\"Nothing\")\r\n expected = parse_biom_table_str(self.nothing_sorted_otu_table1)\r\n self.assertEqual(actual, expected)", "def detect_and_shuffle(self, *args):\n\n self._update_suspicion()\n self.remove_attackers()\n self.drop_buckets()\n buckets = self.get_buckets_to_sort()\n if len(buckets) > 0:\n self._reorder_buckets(buckets)\n self._sort_buckets(buckets)", "def Filter(self,val):\n \n #set th elength of the lis to 0\n List = [self.InitialList[i] for i in range(0,len(self.InitialList))]\n FilterValues = [None]\n Grab = [None]\n Headers = []\n \n #create the quick index\n for i in range(len(self.Condensensed)):\n \n Headers.append([self.Condensensed[i][l][0] for l in range(len(self.Condensensed[i]))])\n \n #grab the values...\n for j in range(len(self.Variables)):\n \n FilterValues.append(self.Variables[j].get())\n\n if self.Variables[j].get().split(' ')[0] == 'All':\n \n Grab.append(False)\n \n else:\n \n Grab.append(True)\n \n #intermediate list to compare\n ToCompare = []\n \n for i in range(1,len(Grab)):\n \n if Grab[i]:\n \n #find the index\n l = Headers[i].index(FilterValues[i])\n \n #grab it\n ToCompare.append([self.Condensensed[i][l][m] for m in range(len(self.Condensensed[i][l]))])\n\n\n for i in range(0, len(ToCompare)):\n \n List = list(set(List).intersection(ToCompare[i]))\n\n #update the interface\n self.Gatherer(List,list(self.Input))\n self.BuildTree()", "def stage_two_preprocessing(data: pd.Series) -> pd.Series:\n # designed to be run after remove_contractions\n data_ = data.dropna()\n data_ = remove_punctuation(data_)\n data_ = numbers_to_words(data_)\n data_ = remove_stopwords(data_)\n return data_", "def prune(self):\n self.sort(key=lambda chunk: chunk.probability)\n del self[:-self.model.num_parses]", "def inside_first_filter():\n print(\"inside_first_filter\")\n if len(gCodeBlocks) == 0:\n print(\"no gcode loaded: cannot apply filter\")\n return\n block_to_filter = gCodeBlocks[-1]\n\n g01blocks = block_to_filter.g01blocks\n ng01 = len(g01blocks)\n\n while True:\n swp = False\n for i in range(ng01-1):\n for j in range(i+1, ng01):\n if g01blocks[i].contains(g01blocks[j]):\n g01blocks[i], g01blocks[j] = g01blocks[j], g01blocks[i]\n swp = True\n\n if not swp:\n break\n\n # rearrange original lines\n block_to_filter.lines = []\n for g01block in block_to_filter.g01blocks:\n for line in g01block.lines:\n block_to_filter.lines.append(line)", "def squeeze_accept(partition):\n Write a function that\n - Sort districts by most Democratic heavy and most Republican heavy\n\n - Assign a base value of competitiveness for each district\n - Run chain, accept only if districts satisfy values under or order\n \"\"\"\n\n#--- CONSTRAINTS\n\n\"\"\"", "def eliminate(values):\n\tsolved = [box for box in boxes if len(values[box]) == 1]\n\tempties = [box for box in boxes if len(values[box]) == 0]\n\n\tfor empty in empties:\n\t\tvalues[empty] = '123456789'\n\n\tfor box in solved:\n\n\t\tfor peer in peers[box]:\n\t\t\tvalues = assign_value(values, peer, values[peer].replace(values[box], ''))\n\n\treturn values", "def __qualitaetsListeProteins(self):\n rv = []\n pam30_sortierbar = {}\n for key in pam30.keys():\n pam30_sortierbar[str(pam30[key]) + \";\" + ''.join(key)] = pam30[key]\n if key[0] != key[1]:\n pam30_sortierbar[\n str(pam30[key]) + \";\" + ''.join((key[1], key[0]))\n ] = pam30[key]\n sorted_keys = list(pam30_sortierbar.keys())\n sorted_keys.sort(key=lambda k: int(k.split(\";\")[0]), reverse=True)\n # debugging kept for historical reasons\n # for key in iter(sorted_keys):\n # print(key.split(\";\")[1] + \" has score \" + str(pam30_sortierbar[key]))\n for key in iter(sorted_keys):\n rv.append(key.split(\";\")[1])\n return(rv)", "def brute_force_cow_transport(cows,limit=10):\n # TODO: Your code here\n #print(list(cows.items()))\n cows_list=list(cows.items())\n curr_list=[[[0]]]\n for i in range(1,len(cows_list)):\n smaller_fun(curr_list,i,limit,cows_list)\n\n ans =sorted(curr_list,key=lambda x:len(x))\n print(ans)\n ansfinal=[]\n for item in ans:\n trip=[]\n for i in range(len(item)):\n trip.append(cows_list[item[i]][0])\n ansfinal.append(trip)\n return ansfinal", "def prep_input(buses):\n return sorted([(bus, offset) \n for offset, bus \n in enumerate(buses) \n if bus], reverse=True)", "def reorder( self ):\n self.sorted.sort(self.compareFunction)", "def sort(self):\n self.deckcards.sort()", "def test_sort_sample_ids_by_mapping_value(self):\r\n actual = sort_sample_ids_by_mapping_value(mapping_file=self.mapping_f1,\r\n field='days_since_epoch',\r\n field_type_f=float)\r\n expected = zip(['NotInOtuTable', '1', 'Z2', 'Z1', 'A'],\r\n [0.0, 5.7, 10, 23, 400000])\r\n self.assertEqual(actual, expected)", "def apply_filters(self):\n hurst_cut = 0\n coint_cut = 0\n half_life_cut = 0\n mean_cross_cut = 0\n\n # Create an empty list for pairs that pass the filter tests\n validated_pairs = []\n\n # Create all the pairs combination\n self.create_pair_differences()\n\n # Print the number of potential pairs\n print(f\"Number of potential pairs in before filter: {len(self.__pairs_data)}\")\n\n for pair in self.__pairs_data:\n # Select the stocks from the pair\n stock1 = pair[0]\n stock2 = pair[1]\n\n # Test the hurst filter\n if self.hurst_filter(self, stock1=stock1, stock2=stock2):\n hurst_cut += 1\n if self.engel_filter(self, stock1=stock1, stock2=stock2):\n coint_cut += 1\n if self.half_life_filter(self, stock1=stock1, stock2=stock2):\n half_life_cut += 1\n if self.mean_cross_filter(self, stock1=stock1, stock2=stock2):\n mean_cross_cut += 1\n validated_pairs.append([stock1, stock2])\n\n print(f\"Hurst filter pass: {hurst_cut}\")\n print(f\"Co-integration filter pass: {coint_cut}\")\n print(f\"Half-life filter pass: {half_life_cut}\")\n print(f\"Mean-cross filter pass: {mean_cross_cut}\")\n print(f\"Final Number of validated pairs: {len(validated_pairs)}\")\n print(\"The final validated pairs are: \")\n print(validated_pairs)\n\n # Save it to the attribute\n self.__validated_pairs = validated_pairs\n self.__validated_pairs_diff = self.__pair_diff[self.symbolize_pairs(self.__validated_pairs)]", "def pulp_smash():", "def trim_items(self, val):\n self.order_items = self.order_items[:val]", "def test_sort_and_fill_taxa_summaries(self):\r\n exp = [\r\n (['Even1', 'Even2', 'Even3'],\r\n ['Bacteria;Actinobacteria;Actinobacteria(class);Actinobacteridae',\r\n 'Bacteria;Actinobacteria;Actinobacteria(class);NotARealTaxa',\r\n 'Bacteria;AnotherFakeTaxa',\r\n 'Bacteria;Bacteroidetes/Chlorobigroup;Bacteroidetes;Bacteroidia',\r\n 'Bacteria;Firmicutes;Bacilli;Lactobacillales',\r\n 'Bacteria;Firmicutes;Clostridia;Clostridiales',\r\n 'Bacteria;Firmicutes;Erysipelotrichi;Erysipelotrichales',\r\n 'Bacteria;Proteobacteria;Gammaproteobacteria;Enterobacteriales',\r\n 'Eukarya',\r\n 'No blast hit;Other'],\r\n array([[0.0880247251673, 0.0721968465746, 0.081371761759],\r\n [0., 0., 0.],\r\n [0., 0., 0.],\r\n [0.192137761955, 0.191095101593, 0.188504131885],\r\n [0.0264895739603, 0.0259942669171, 0.0318460745596],\r\n [0.491800007824, 0.526186212556, 0.49911159984],\r\n [0.0311411916592, 0.0184083913576, 0.0282325481054],\r\n [0.166137214246, 0.163087129528, 0.168923372865],\r\n [0., 0., 0.],\r\n [0.00426952518811, 0.00303205147361, 0.0020105109874]])),\r\n (['Even4', 'Even5', 'Even6'],\r\n ['Bacteria;Actinobacteria;Actinobacteria(class);Actinobacteridae',\r\n 'Bacteria;Actinobacteria;Actinobacteria(class);NotARealTaxa',\r\n 'Bacteria;AnotherFakeTaxa',\r\n 'Bacteria;Bacteroidetes/Chlorobigroup;Bacteroidetes;Bacteroidia',\r\n 'Bacteria;Firmicutes;Bacilli;Lactobacillales',\r\n 'Bacteria;Firmicutes;Clostridia;Clostridiales',\r\n 'Bacteria;Firmicutes;Erysipelotrichi;Erysipelotrichales',\r\n 'Bacteria;Proteobacteria;Gammaproteobacteria;Enterobacteriales',\r\n 'Eukarya',\r\n 'No blast hit;Other'],\r\n array([[0., 0., 0.],\r\n [0.99, 0.11, 0.075],\r\n [0.1921, 0.19109, 0.18],\r\n [0.192137761955, 0.191095101593, 0.188504131885],\r\n [0.0264895739603, 0.0259942669171, 0.0318460745596],\r\n [0.491800007824, 0.526186212556, 0.49911159984],\r\n [0.0311411916592, 0.0184083913576, 0.0282325481054],\r\n [0.166137214246, 0.163087129528, 0.168923372865],\r\n [0., 0., 0.],\r\n [0.00426952518811, 0.00303205147361, 0.0020105109874]])),\r\n (['Even7', 'Even8'],\r\n ['Bacteria;Actinobacteria;Actinobacteria(class);Actinobacteridae',\r\n 'Bacteria;Actinobacteria;Actinobacteria(class);NotARealTaxa',\r\n 'Bacteria;AnotherFakeTaxa',\r\n 'Bacteria;Bacteroidetes/Chlorobigroup;Bacteroidetes;Bacteroidia',\r\n 'Bacteria;Firmicutes;Bacilli;Lactobacillales',\r\n 'Bacteria;Firmicutes;Clostridia;Clostridiales',\r\n 'Bacteria;Firmicutes;Erysipelotrichi;Erysipelotrichales',\r\n 'Bacteria;Proteobacteria;Gammaproteobacteria;Enterobacteriales',\r\n 'Eukarya',\r\n 'No blast hit;Other'],\r\n array([[0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [0., 0.],\r\n [1., 1.],\r\n [0., 0.]]))\r\n ]\r\n\r\n obs = _sort_and_fill_taxa_summaries([self.taxa_summary1,\r\n self.taxa_summary2,\r\n self.taxa_summary3])\r\n self.compare_multiple_level_array(obs, exp)", "def sort_auto(self):\n key = lambda buz1, buz2: buz1 if buz1.trip_duration <= buz2.trip_duration else buz2\n self.autobuze.sort(key=key)", "def sort_keys_generate(self, mail):\n\t\t# Reset sort keys for every mail:\n\t\tself.sort_keys = []\n\t\tfor key, form in self.selectors:\n\t\t\t# Sort by filter matches only (1:1):\n\t\t\tif key in self.filter_matches.keys():\n\t\t\t\tself.sort_keys_add(key, form, self.filter_matches[key])\n\t\t\t# Sort by all header parts (1:N):\n\t\t\telse:\n\t\t\t\tself.sort_keys_add(key, form, header_values(key, mail))\n\t\treturn len(self.sort_keys)", "def sortColors(self, nums: List[int]) -> None:\n\n#---------------------Solution1----------------------# Dutch Flag Problem\n\n zero, one, two = 0, 0, len(nums)-1\n\n while one <= two:\n if nums[one] == 0:\n nums[one], nums[zero] = nums[zero], nums[one]\n zero += 1\n one += 1\n elif nums[one] == 1:\n one += 1\n else:\n nums[one], nums[two] = nums[two], nums[one]\n two -= 1\n\n\n#---------------------Solution2----------------------# Brute Force, Fast\n\n zeros=nums.count(0)\n for _ in range(zeros):\n nums.remove(0)\n nums.append(0)\n ones=nums.count(1)\n for _ in range(ones):\n nums.remove(1)\n nums.append(1)\n twos=nums.count(2)\n for _ in range(twos):\n nums.remove(2)\n nums.append(2)", "def filter_query(self):\n with open(self.mash_out_path, \"r\") as f:\n for line in f.readlines():\n line = \" \".join(line.split())\n hold = []\n for x in line.split(' '):\n hold.append(x)\n self.mash_all[str(hold[1])] = [\"--\", hold[2], hold[3]] ## Holds entire mash output\n if (float(hold[3]) > 0.001 or float(hold[3]) == 0):\n self.mash_dict[str(hold[1])] = [\"--\", hold[2], hold[3]] ## key = fasta id = [name, distance, p-val], holds filtered mash output\n\n q = SeqIO.parse(open(self.path3), 'fasta')\n self.filtered_out_path = os.getcwd() + \"/filtered_query.fasta\" ## Holds sequences for off-target algorithm\n with open(self.filtered_out_path, 'w') as outfile:\n for fasta in q:\n id, description = fasta.id, fasta.description\n if id in self.mash_dict.keys():\n line = description.replace(\",\",\"\")\n hold = []\n name = \"\"\n for x in line.split(' '):\n hold.append(x)\n for i in range(1,len(hold)-2):\n name += hold[i] + \" \"\n self.mash_dict[id][0] = name\n SeqIO.write(fasta, outfile, 'fasta')\n\n self.progress_val += 100/6", "def sort_reads(self): \n if not self.sampling:\n self.convert_to_array()\n self.reads = self.reads[self.reads[:,0].argsort()]", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = [90]\n errors=[90]\n single_set=[]\n test_tuple=[]\n\n for (networth,age,pred) in zip(net_worths,ages,predictions):\n error=abs(networth-pred)\n errors.append(error)\n single_set=(age,networth,error)\n test_tuple.append(single_set)\n #print predictions\n\n print (\"****test tuple***\")\n #for x in test_tuple:\n # print x\n #errors.sort()\n #print errors\n #print len(errors)\n print(\"########\")\n\n #cleaned_data=list(zip(ages,net_worths,errors))\n #cleaned_data.sort(errors)\n #cleaned_data.sort(key=lambda tup: tup[2])\n from operator import itemgetter\n sorted_data=sorted(test_tuple,key = itemgetter(2))\n\n print \"Printing sorted set\"\n #print sorted_data\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(81)\n sorted_data.__delitem__(80)\n #for x in sorted_data:\n # print x\n #print sorted_data\n\n #print len(sorted_data)\n\n ### your code goes here\n\n \n return sorted_data", "def sort(self):\n # Base Case\n # If the robot has reached the end of the list and his light is off (no swaps have occurred),\n if self.can_move_right() == False and self.light_is_on() == False:\n return\n\n # Grab the first card\n self.swap_item()\n\n # While the robot is still able to move right,\n while self.can_move_right():\n\n # Move right\n self.move_right()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is greater than what he is holding (-1), swap items\n if self.compare_item() == -1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once the robot can no longer move right, he is at the end of the list and holding the largest value\n # Swap items\n self.swap_item()\n\n # Now the robot needs to traverse back to index 0, grabbing the smallest value as he goes\n # Follow the same logic as when he moved right with the largest value\n\n # If he hits a empty slot in the list, everything in front of it has been sorted\n # He doesn't need to sort anymore, he is holding the smallest value left to be sorted. \n # Put it in the blank spot and turn to move back in the other direction\n\n while self.compare_item() is not None:\n\n # Move left\n self.move_left()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is less than what he is holding (1), swap items\n if self.compare_item() == 1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once self.compare_item() is None, that means he is in front of a blank space\n # - everything to the left of the blank space has already been sorted\n # Deposit what he is holding\n self.swap_item()\n\n # Reset the light to the off position\n self.set_light_off()\n\n # Move one spot over to the right\n self.move_right()\n\n # Re-run the process all over again\n self.sort()", "def shifter(list):\n #sc1 = \"objects \" #Scaffolding message variables. Temporary\n #sc2 = \" and \"\n #sc3 = \" switched\"\n #sc4 = \" in order\"\n n = len(list) #Assign length of list to variable n\n x = 0 #Start at first position in list\n while listscan(list):\n if list[x] > list[x + 1]:\n t1= list[x] #Assign both items to a variable, then reinsert in opposite positions\n t2 = list[x + 1]\n list[x + 1] = t1\n list[x] = t2\n #print(sc1 + str(x) + sc2 + str(x + 1) + sc3)\n if x + 1 < n - 1: #Only when not at end\n x = x + 1 #Move position one more right\n else: #Base case when unsorted\n x = 0 #Restart Cycle\n else: #If sorted, and more room to right, move over one, leave items in position.\n if x + 1 < n - 1:\n #print(sc1 + str(x) + sc2 + str(x + 1) + sc4)\n x = x + 1\n else: #Base case. If at end of list, and items in order, leave.\n print(sc1 + str(x) + sc2 + str(x + 1) + sc4)\n x = 0 #Restart cycle", "def test_dotted_sorting(self):\n assert natsort(['1.5', '1.0']) == ['1.0', '1.5']", "def FilterScafDict(ScafDict):\n\n def CheckScafOrder(NestedListBoi, StrandInfo):\n \"\"\"The purpose of this nested function is to check if the size of the\n previous scaffold is less than the current. Returns True if this is the\n case, and false if this fails\n\n :arg1: [[0, 82558], [82568, 14200], [96783, 4436], [101349, 11648],\n [113468, 12600], [126901, 6375], [136697, 30162]]\n :returns: Boolean value TRUE of FALSE\n \"\"\"\n NoOverlap = True\n \n \n \n CurrentLen = 0\n if StrandInfo == '+':\n for item in NestedListBoi:\n AddItems = item[0] + item[1] \n if AddItems > CurrentLen:\n CurrentLen = AddItems\n else:\n print(\"WE ARE FUCKEDDDDDD\")\n NoOverlap = False\n\n elif StrandInfo == '-':\n #Flip list for negative\n NestedListBoi = NestedListBoi[::-1]\n for item in NestedListBoi:\n AddItems = item[0] + item[1] \n if AddItems > CurrentLen:\n CurrentLen = AddItems\n else:\n print(\"WE ARE FUCKEDDDDDD\")\n break\n sys.exit(2)\n NoOverlap = False\n return NoOverlap\n\n\n for key, value in ScafDict.items():\n StartPGASeq = int(value[0][0][2])\n EndPGaSeq = int(value[-1][0][2])\n \n TotalScaflen = int(value[0][1][5])\n LastLastScafLentoadd = int(value[-1][1][3])\n NegLastScafToAdd = int(value[0][1][3])\n\n\n TakeAllScafStartsAndLens = []\n\n for thing in value:\n StartAndLen = [int(thing[1][2]), int(thing[1][3])]\n TakeAllScafStartsAndLens.append(StartAndLen)\n \n #Check if there is any overlap with scaf hitting different PGA scaf\n TakeStrand = value[0][1][4]\n Overlap = CheckScafOrder(TakeAllScafStartsAndLens, TakeStrand)\n \n\n #Print List out with correct orientation\n if TakeStrand == '-':\n FinalPGSLoc = (EndPGaSeq)\n NegScafEnd = StartPGASeq + NegLastScafToAdd\n FinalListToPrint = [key,str(EndPGaSeq), str(NegScafEnd), str(TakeStrand)]\n print('\\t'.join(FinalListToPrint))\n\n elif TakeStrand == '+':\n FinalPGSLoc = (EndPGaSeq + LastLastScafLentoadd)\n FinalListToPrint = [key,str(StartPGASeq), str(FinalPGSLoc), str(TakeStrand)]\n print('\\t'.join(FinalListToPrint))\n\n #print(\"FINAL\")\n #print(key)\n #print(CurrentVal)\n #print(FinalItem[2][0:5])\n #input()", "def spilt_columns(self):\n special_attentation_columns = [\n 'trim',\n 'displacement',\n 'transmission_description',\n ]\n\n # Parse and clean the content of trim\n self.data[special_attentation_columns[0]] = self.data[special_attentation_columns[0]].str.replace(\n '(?:\\s|^|\\d)dr(?:\\s)', '')\n self.data[special_attentation_columns[0]] = self.data[special_attentation_columns[0]].str.replace(\n 'Sport Utility', 'Sport Utility Vehicle')\n self.data[special_attentation_columns[0]\n ] = self.data[special_attentation_columns[0]].str.strip()\n # print(self.data[special_attentation_columns[0]])\n\n # Extract engine displacement\n self.data[special_attentation_columns[1]] = self.data[special_attentation_columns[1]].str.extract(\n '(\\d\\.\\d)', expand=True)\n # print(self.data[special_attentation_columns[1]])\n\n # Extract transimission speed\n self.data['transmission_speeds'] = self.data[special_attentation_columns[2]].str.extract(\n '(\\d+)', expand=True)\n # print(self.data['transmission_speeds'])\n\n # Extract transimission type\n trans_type_temp = self.data[special_attentation_columns[2]].str.extract(\n '((?:\\s|^)Automatic(?:\\s|$))|((?:\\s|^)Manual(?:\\s|$))', expand=True)\n self.data['transmission_type'] = trans_type_temp[0].fillna(\n trans_type_temp[1])\n self.data['transmission_type'] = self.data['transmission_type'].str.strip()\n # print(self.data['transmission_type'])\n\n # Parse and clean the content of transmission description\n self.data[special_attentation_columns[2]\n ] = self.data[special_attentation_columns[2]].str.replace('Spd', 'Speed')\n self.data[special_attentation_columns[2]\n ] = self.data[special_attentation_columns[2]].str.replace('w/Manual', 'Manual')\n self.data[special_attentation_columns[2]] = self.data[special_attentation_columns[2]].str.replace(\n 'w/Automatic', 'Automatic')\n self.data[special_attentation_columns[2]\n ] = self.data[special_attentation_columns[2]].str.replace('w/OD', '')\n self.data[special_attentation_columns[2]\n ] = self.data[special_attentation_columns[2]].str.strip()\n # print(self.data[special_attentation_columns[2]])\n\n return None", "def outlierCleaner(predictions, ages, net_worths):\n import operator\n cleaned_data = []\n temp = {}\n ### your code goes here\n \n for i in range(len(ages)):\n error = predictions[i] - net_worths[i]\n temp[i] = error\n #print temp\n sorted_x = sorted(temp.items(), key=operator.itemgetter(1))\n sorted_x.reverse() \n #print sorted_x\n ten_p = (int)(0.1*len(ages))\n poop = sorted_x[(ten_p):]\n \n for item in poop:\n idx = item[0]\n cleaned_data.append((ages[idx],net_worths[idx],item[1][0]))\n \n return cleaned_data", "def _sort_by_price(self, data):\n # Separate the data by currency\n alch = []\n fusing = []\n chaos = []\n exalted = []\n \n for item in data:\n price = item['price']\n if \"alchemy\" in price:\n alch.append(item)\n elif \"fusing\" in price:\n fusing.append(item)\n elif \"chaos\" in price:\n chaos.append(item)\n elif \"exalted\" in price:\n exalted.append(item)\n \n alch = natsorted(alch, key=lambda item: item['price'])\n fusing = natsorted(fusing, key=lambda item: item['price'])\n chaos = natsorted(chaos, key=lambda item: item['price'])\n exalted = natsorted(exalted, key=lambda item: item['price'])\n \n result = []\n result.extend(alch)\n result.extend(fusing)\n result.extend(chaos)\n result.extend(exalted)\n return result", "def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored", "def sorter(row):\n criteria = []\n for value in row[1]: # Ignore enumeration\n criteria.append(\n (\n value is not None,\n \"\" if isinstance(value, Number) else type(value).__name__,\n value,\n )\n )\n return criteria", "def cleanse_priest_list(priests_list):", "def sort_drops(x, alphabetical=True):\n if alphabetical != True:\n drop_sort = {\"W1\": 1, \"W2\": 2, \"W3\": 3, \"W4\": 4, \"W5\": 5, \"W6\": 6, \"S1\": 7, \"S2\": 8, \"S3\": 9, \"S4\": 10}\n return drop_sort[x]\n else:\n pass", "def clean_data(df_turnstiles):\n\n # sort values in a such a way that the duplicate values sit directly below the originals, so they will be removed.\n df_turnstiles.sort_values(\n [\"C/A\", \"UNIT\", \"SCP\", \"STATION\", \"DATE_TIME\"],\n inplace=True,\n ascending=False,\n )\n # keeps top row, deletes others\n df_turnstiles.drop_duplicates(\n subset=[\"C/A\", \"UNIT\", \"SCP\", \"STATION\", \"DATE_TIME\"], inplace=True\n )\n\n # remove DESC column\n df_turnstiles = df_turnstiles.drop([\"DESC\"], axis=1, errors=\"ignore\")\n\n # remove the many spaces in the EXITS column name\n df_turnstiles.rename(\n columns={\n \"EXITS \": \"EXITS\"\n },\n inplace=True,\n )\n\n return df_turnstiles", "def sort(self):\n for section, section_items in self.items():\n if sorted(section_items) == list(section_items):\n continue\n\n section_dict = {k: v for k, v in section_items.items()}\n\n for k in list(section_items):\n self.remove_option(section, k)\n\n for k, v in sorted(section_dict.items()):\n self.set(section, k, v)", "def naked_twins(values):\n \n #display(values)\n # Find all instances of naked twins\n naked_twins = []\n for unit in unitlist:\n for box in unit:\n if len(values[box]) == 2:\n for other_box in peers[box]:\n if values[box] == values[other_box]:\n \n naked_twins.append((box,other_box))\n # Eliminate the naked twins as possibilities for their peers\n #print(set(naked_twins))\n for unit in unitlist:\n for naked_twin in set(naked_twins):\n if naked_twin[0] in unit and naked_twin[1] in unit:\n for box in unit:\n if box not in naked_twin:\n values = assign_value(values, box, values[box].replace(values[naked_twin[0]][0],\"\"))\n values = assign_value(values, box, values[box].replace(values[naked_twin[0]][1],\"\"))\n #print()\n #display(values)\n #print()\n #from solution_test import after_naked_twins\n #display(after_naked_twins)\n return values", "def induced_sorting(\n lms, tails, heads, SA, type_suffix, text, n, m, alpha, bucket_sizes, sigma\n):\n for i in range(m - 1, -1, -1): # place LMS suffixes at the end of their buckets\n nfs = tails[text[lms[i]]]\n SA[nfs] = lms[i]\n tails[text[lms[i]]] -= 1\n\n for i in range(n): # place the L-type suffixes at the fronts of their buckets\n if SA[i] > 0 and type_suffix[SA[i] - 1] == L_TYPE:\n nfs = heads[text[SA[i] - 1]]\n SA[nfs] = SA[i] - 1\n heads[text[SA[i] - 1]] += 1\n\n # reset bucket counters\n heads, tails = bucket_intervals(alpha, bucket_sizes, sigma)\n\n for i in range(\n n - 1, -1, -1\n ): # place the S-type suffixes at the ends of their buckets\n if SA[i] > 0 and type_suffix[SA[i] - 1] == S_TYPE:\n nfs = tails[text[SA[i] - 1]]\n SA[nfs] = SA[i] - 1\n tails[text[SA[i] - 1]] -= 1", "def reversesort(self):\n ...", "def data_for_sorting() -> NoReturn:\n raise NotImplementedError", "def data_for_sorting() -> NoReturn:\n raise NotImplementedError" ]
[ "0.58309174", "0.5732334", "0.55798095", "0.55124855", "0.55037796", "0.548661", "0.5477536", "0.5469715", "0.5449085", "0.5353315", "0.5303698", "0.5301049", "0.5292167", "0.5289656", "0.52799255", "0.5173217", "0.51695305", "0.5167786", "0.5151389", "0.51467687", "0.51287025", "0.51224047", "0.51199245", "0.50952893", "0.50905967", "0.5090106", "0.50845313", "0.5064726", "0.5046509", "0.50448465", "0.50346756", "0.5032411", "0.501563", "0.50075245", "0.500233", "0.4975042", "0.4975042", "0.4952938", "0.49444398", "0.49393064", "0.4923555", "0.49086183", "0.4905516", "0.49036488", "0.4898754", "0.48867464", "0.4885383", "0.4884024", "0.48837885", "0.4871859", "0.486786", "0.48616886", "0.4857236", "0.48568898", "0.4855108", "0.48514035", "0.4848111", "0.4846964", "0.48413607", "0.4841097", "0.4838235", "0.483552", "0.4831241", "0.4826565", "0.4822227", "0.48197627", "0.4819165", "0.4813942", "0.48081332", "0.47989216", "0.4798386", "0.479527", "0.47889483", "0.47883224", "0.47846782", "0.47808442", "0.47804412", "0.47797632", "0.47750774", "0.47722027", "0.47670224", "0.4763477", "0.4762638", "0.4757471", "0.4750618", "0.47476763", "0.47474548", "0.4740562", "0.47402197", "0.47393832", "0.47334167", "0.4730609", "0.4726881", "0.47195113", "0.47121984", "0.4711911", "0.47107765", "0.4707686", "0.47012344", "0.46988443", "0.46988443" ]
0.0
-1
Write the matrix to a csv table
def write_out(matrix, filename): with open(filename, 'w') as csvfile: writer = csv.writer(csvfile) for r in matrix: writer.writerow(r) print(filename + ' writen!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)", "def WriteToCsv(matrix, csvFileName, csvDelimiter=','):\r\n \r\n if os.path.isfile(csvFileName) == True:\r\n os.remove(csvFileName) # Deletes the CSV file\r\n\r\n filePermission = \"w\" # Platform-specific file reading privileges\r\n #if platform.system() == \"Windows\":\r\n # filePermission = \"wb\"\r\n \r\n with open(csvFileName, filePermission) as csvfile:\r\n writer = csv.writer(csvfile, delimiter=csvDelimiter, quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n for row in matrix:\r\n if row != []:\r\n writer.writerow(row)\r\n csvfile.close()", "def createFileCSV(table, path=\"./prediction\"):\t\n\tif len(table) < 1:\n\t\traise NameError('Empty Table!')\n\telse:\n\t\tfile = open(path + '.csv', 'w+')\n\n\t\tfile.write(table[0].toStringHeaders() + \"\\n\")\n\n\t\tfor row in table:\n\t\t\tfile.write(row.toStringCSV() + '\\n')\n\t\tfile.close()", "def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()", "def write_csv(self):\n self.tableView.df.to_csv('Data export.csv', index=False)\n print('CSV file exported')", "def export_csv(self, path):\r\n\r\n with open(path, 'w') as f:\r\n f.write('# h,hr,m')\r\n\r\n if self.rho is not None:\r\n f.write(',rho')\r\n if self.temperature is not None:\r\n f.write(',temperature')\r\n\r\n f.write('\\n')\r\n for i in range(self.shape[0]):\r\n for j in range(self.shape[1]):\r\n f.write(f'{self.h[i, j]},{self.hr[i, j]},{self.m[i, j]}')\r\n if self.rho is not None:\r\n f.write(f',{self.rho[i, j]}')\r\n if self.temperature is not None:\r\n f.write(f',{self.temperature[i, j]}')\r\n f.write('\\n')\r\n return", "def write_table_to_csv(table: List[List], filename: str):\n with open(filename, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t')\n for row in table:\n writer.writerow(row)", "def to_csv(header, rows):\r\n with open('result.csv', 'w') as result:\r\n result_writer = csv.writer(result, delimiter=';')\r\n result_writer.writerow(header)\r\n result_writer.writerows(rows)", "def write(self): \n # Open csv file\n with open(self.file_name, 'w', newline='') as file:\n self._writer = csv.writer(file)\n \n # Write header rows\n# self.write_sim_header_data(self.trace.sim.get_data())\n \n # Write trace table\n self._writer.writerow(['Record #', 'Rep', 'Time',\n 'Priority', 'Record Type', 'Name'])\n for trace_record in self.trace._record_list:\n self._writer.writerow(trace_record.get_row())\n file.close()", "def mat_to_csv(\n self,\n input_matrix,\n output_csv,\n fields=None,\n n_tab=1,\n debug=False,\n i='origin',\n j='destination'\n ):\n script_text = r\"\"\"\n RUN PGM=MATRIX PRNFILE=\"format_env\\mat_to_csv.prn\" MSG='mat_to_csv'\n\n FILEI MATI[1] = filei_mati\n FILEO PRINTO[1] = fileo_printo\n\n print_headers\n JLOOP\n print_in_jloop\n ENDJLOOP\n\n ENDRUN\n \"\"\"\n if fields is None:\n tabs = ['tab_%i' % (i + 1) for i in range(n_tab)]\n fields = tabs\n else:\n n_tab = len(fields)\n field_names = ', '.join(fields)\n\n filei_mati = '\"%s\"' % input_matrix\n fileo_printo = '\"%s\"' % output_csv\n\n print_headers = 'IF (I = 1) \\n PRINT LIST =\"' + '\" ,\";\" ,\"'.join([i, j] + fields) + '\" PRINTO = 1 \\n ENDIF'\n print_assignation = ' '.join(['%s = MI.1.%s \\n' % (fields[i].replace(' ', '_'), i + 1) for i in range(n_tab)])\n print_statement = 'PRINT LIST = I, \";\", J, \";\", ' + ',\";\",'.join([f.replace(' ', '_') for f in fields]) + ' PRINTO = 1'\n print_in_jloop = print_assignation + ' \\n' + print_statement\n\n # creating a cube script\n script = open(self.environment + r'\\mat_to_csv.s', 'w', encoding='latin')\n script.write(script_text.replace(\n 'format_env', self.environment).replace(\n 'filei_mati', filei_mati).replace(\n 'fileo_printo', fileo_printo).replace(\n 'field_names', field_names).replace(\n 'print_in_jloop', print_in_jloop).replace('print_headers', print_headers))\n script.close()\n\n # runs the script with voyager.exe\n options = \"\"\"/Start /CloseWhenDone /Minimize /NoSplash\"\"\" if not debug else \"\"\n os.system('voyager.exe \"' + self.environment + r'\\mat_to_csv.s\" ' + options)", "def generate_csv_table(table_values):\n\n with open('ayasdi_assignment.csv', 'wb') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',')\n filewriter.writerows(table_values)", "def writeMatrix(self):\n\t\tpass", "def _csvWriter(self):\r\n # Initialize Header\r\n table = []\r\n voltageRow = []\r\n for i in range(len(self._voltages)):\r\n voltageRow.append(self._voltages[i][0])\r\n voltageRow.append(\" \")\r\n if self._vna.isTwoComponents():\r\n voltageRow.append(\" \")\r\n table.append(voltageRow)\r\n \r\n # Fill table with data\r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._frequency[0])):\r\n # row = []\r\n # for j in range(len(self._frequency)):\r\n # row.append(self._frequency[j][i])\r\n # row.append(self._intensity[j][2*i])\r\n # row.append(self._intensity[j][2*i + 1])\r\n # table.append(row)\r\n # else: \r\n for i in range(len(self._frequency[0])):\r\n row = []\r\n for j in range(len(self._frequency)):\r\n row.append(self._frequency[j][i])\r\n row.append(self._intensity[j][i])\r\n table.append(row)\r\n\r\n # Write to CSV\r\n filename = 'CSVs/' + self._vna.getDateFormatted() + '.csv'\r\n with open(filename, 'w', newline='') as csvfile:\r\n dataWriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\r\n for i in range(len(table)):\r\n dataWriter.writerow(table[i])", "def write_csv(self, filelike):\r\n items = self.rows()\r\n writer = unicodecsv.writer(filelike, encoding=\"utf-8\")\r\n writer.writerow(self.header())\r\n for item in items:\r\n writer.writerow(item)", "def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return", "def write_table_to_file(table):\n with open(\"story.csv\", \"w\") as file:\n for record in table:\n row = ';'.join(record)\n file.write(row + \"\\n\")", "def write_to_csv(self, data):\n with open(\"out.csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(self.column_names)\n writer.writerows(data)\n print(\" Updated succesfully \")", "def WriteMatrix(matrix, outfile=sys.stdout, separator=\"\\t\", format=\"%f\",\n row_headers=None, col_headers=None):\n if col_headers:\n outfile.write(separator + separator.join(col_headers) + \"\\n\")\n\n for x in range(0, matrix.shape[0]):\n if row_headers:\n outfile.write(row_headers[x] + separator)\n outfile.write(\n string.join(map(lambda x: format % x, matrix[x, ]), separator) + \"\\n\")", "def writeTable(table, filename):\n with open(filename, \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(table)", "def export_table(path, path_out):\n table = rb.get_table(path)\n table.to_csv(path_out, index=False)\n return", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def to_csv(self, csvwriter):\n csvwriter.writerow(self.to_csv_row())", "def exportToCsv(self, filepath):\n table = list()\n table.append(list(self.__header))\n for a in self.__assays:\n table.append(\n [unicode(a.timestamp.isoformat()),\n unicode(a.dab_cell_count),\n unicode(a.hem_cell_count),\n unicode(a.dab_dabhemfraction),\n unicode(a.img_path)])\n # File encoding will be same as it expected by Excel on machine where\n # this file was created.\n encoding = locale.getpreferredencoding()\n with open(filepath, mode='wb') as f:\n writer = UnicodeWriter(f, encoding=encoding, delimiter=';')\n writer.writerows(table)", "def write_torque_table(A, filename):\n f = open(filename, 'w')\n for row in range(np.size(A, axis=0)):\n A[row,:].tofile(f, sep=',')\n f.write('\\n')\n f.close()", "def write_csv(self, outfile, collapse_orders=False, show_age=False):\r\n # Write header row\r\n outfile.write(self.get_csv_header(collapse_orders, show_age).encode())\r\n\r\n # Write content\r\n for x in self.records:\r\n x.write_csv(outfile, collapse_orders, show_age)", "def export_feature_matrix_csv(feature_matrix,path, delimiter = ','):\n with open(path, encoding='utf-8', mode='w') as f:\n header = ['symbol'] + feature_matrix.features\n writer = DictWriter(f, header,delimiter=delimiter)\n writer.writerow({h: h for h in header})\n for seg in feature_matrix.segments:\n #If FeatureMatrix uses dictionaries\n #outdict = feature_matrix[seg]\n #outdict['symbol'] = seg\n #writer.writerow(outdict)\n if seg in ['#','']: #wtf\n continue\n featline = feature_matrix.seg_to_feat_line(seg)\n outdict = {header[i]: featline[i] for i in range(len(header))}\n writer.writerow(outdict)", "def write(self):\n \n self.df.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/mls.csv')", "def matrix_export_save(simulation, demandsegment, dir):\n matrix = demandsegment.matrix\n matrix_couples = Matrix.objects.filter(matrices=matrix)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n filename = dir + '/matrix(' + demandsegment.usertype.name + ')(' + str(demandsegment.usertype.user_id) + ').tsv'\n\n with codecs.open(filename, 'w', encoding='utf8') as f:\n writer = csv.writer(f, delimiter='\\t')\n # Get a dictionary with all the values to export.\n values = matrix_couples.values_list('p__user_id', 'q__user_id', 'r')\n # Write a custom header.\n writer.writerow(['origin', 'destination', 'population'])\n writer.writerows(values)\n\n return filename", "def at_write_prob_mat_to_csv(na_list, prob_mat, out_path):\n create_folder(os.path.dirname(out_path))\n f = gzip.open(out_path, 'w')\n for n in xrange(len(na_list)):\n na = na_list[n]\n f.write(na)\n for p in prob_mat[n]:\n f.write('\\t' + \"%.3f\" % p)\n f.write('\\r\\n')\n f.close()", "def writeCSV():\n final_list = get_final_list()\n path_to_csv_File = 'system_metrics.csv'\n\n csv_file = open(path_to_csv_File, 'w+', newline='', encoding=\"utf8\")\n csv_file_writer = csv.writer(csv_file, delimiter=',')\n\n csv_file_writer.writerow(['Subscription', 'Resource', 'MetricType',\n 'Timestamp', 'Unit', 'Minimum', 'Maximum', 'Average'])\n\n for item in final_list:\n csv_file_writer.writerow([item['subscription'], item['resource'], item['metricType'], item['timestamp'],\n item['unit'], item['minimum'], item['maximum'], item['average']])\n\n print('Output written successfully!!')", "def save_csv(net, wires, net_id, chip_id, chip):\n with open('output/output.csv', 'w') as file:\n # Write first line\n output = csv.writer(file)\n output.writerow([\"net\", \"wires\"])\n\n # Index and fill the body\n for step in range(len(wires)):\n output.writerow([net[step],wires[step]])\n\n # End of file\n output.writerow([f\"chip_{chip_id}_net_{net_id}\", chip.cost])", "def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])", "def make_csv(file_of_data):\n with open(file_of_data, 'w') as f:\n writer = csv.writer(f)\n header = (\"Counter\", \"Date/time\", \"Latitude\", \"Longitude\", \"Temperature\", \"Humidity\")\n writer.writerow(header)", "def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])", "def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)", "def __create_output_csv(self, df, score_list, elapsed_list):\n df['Similar']=score_list\n df['Elapsed']=elapsed_list\n df.to_csv('Output.csv',index=False)\n return df", "def write_csv(self, file):\n # Write header row\n file.write('Timestamp,MessageType,Queue,Price,Volume,OrderID\\n')\n # Write content\n for x in self.records:\n row = (str(x[0]) + ',' + x[1][\"MessageType\"] + ',' +\n x[1][\"Queue\"] + ',' + str(x[1][\"Price\"]) + ',' +\n str(x[1][\"Volume\"]) + ',' + str(x[1][\"OrderID\"]) + '\\n')\n file.write(row)", "def write_csv(estimates: ListOfDicts, output_csv: str) -> None:\n with open(output_csv, \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=estimates[0].keys())\n writer.writeheader()\n for row in estimates:\n writer.writerow(row)\n logging.info(f\"Wrote estimates as {output_csv}\")", "def write_to_csv(self, data_points):\n keys = data_points[0].keys()\n with open(self.report_path, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(data_points)", "def writeMatrix(outfile, matrix,\n format=\"full\",\n separator=\"\\t\",\n value_format=\"%f\",\n row_headers=None,\n col_headers=None):\n\n if format == \"full\":\n if col_headers:\n outfile.write(separator + separator.join(col_headers) + \"\\n\")\n\n for x in range(0, matrix.shape[0]):\n if row_headers:\n outfile.write(row_headers[x] + separator)\n outfile.write(\n string.join(map(lambda x: value_format % x, matrix[x, ]), separator) + \"\\n\")\n\n elif format == \"phylip\":\n if not row_headers:\n raise \"phylip output requires row headers.\"\n\n nrows = len(row_headers)\n outfile.write(\"%i\\n\" % nrows)\n\n for x in range(0, nrows):\n outfile.write(row_headers[x] + separator)\n outfile.write(\n separator.join([value_format % y for y in matrix[x, ]]) + \"\\n\")\n\n else:\n raise \"unknown output format %s\" % output_format", "def sparse_matrix_to_csv(filename: str,\n X: scipy.sparse.csr_matrix,\n halve_ratings=False):\n data, rows, cols = X.data, *X.nonzero()\n with open(filename, mode='w') as file:\n file_matrix = csv.writer(file,\n delimiter=',',\n quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n for rating, user_id, movie_id in zip(data, rows, cols):\n # restore ratings to their original scale\n if halve_ratings:\n rating = rating / 2.\n file_matrix.writerow([user_id, movie_id, rating])", "def write_as_csv(self,destination=sys.stdout):\n # write sorted\n the_destination=None\n if isinstance(destination,types.FileType):\n the_destination=destination\n elif isinstance(destination,types.StringTypes):\n the_destination=file(destination,\"w\")\n else:\n raise Exception(\"sorry destination %s is not valid\"%(repr(destination)))\n\n the_destination.write(\"# quantity:\"+str(self.quantity_name))\n the_destination.write(\"# x y ysigma n\\n\")\n for x in self.get_xdata():\n y=UserDict.UserDict.__getitem__(self,x)\n if type(y) is types.FloatType:\n the_destination.write(\"%g %g 0 1\\n\"%(x,y)) \n else:\n the_destination.write(\"%g %g %g %d\\n\"%(x,y.mean(),y.mean_sigma(),y.n))\n\n the_destination=None", "def write_csv(self, file: str, table: str, libref: str =\"\", nosub: bool =False, dsopts: dict = None, opts: dict = None) -> 'The LOG showing the results of the step':\n dsopts = dsopts if dsopts is not None else {}\n opts = opts if opts is not None else {}\n\n code = \"filename x \\\"\"+file+\"\\\";\\n\"\n code += \"options nosource;\\n\"\n code += \"proc export data=\"\n\n if len(libref):\n code += libref+\".\"\n\n code += \"'\"+table.strip()+\"'n \"+self._sb._dsopts(dsopts)+\" outfile=x dbms=csv replace; \"\n code += self._sb._expopts(opts)+\" run\\n;\"\n code += \"options source;\\n\"\n\n if nosub:\n print(code)\n else:\n ll = self.submit(code, \"text\")\n return ll['LOG']", "def writecsvfile(filename, columnnames, data):\n with open(filename, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow(columnnames) # header row\n for row in data:\n writer.writerow(row[:])", "def table_to_csv(output_table, cat_column, method, out_csv_names, debug):\n p_df = df_to_pandas(output_table)\n no_of_prod = len(p_df)\n head_df = pd.DataFrame()\n head_df[\"Cluster Name\"] = p_df.reset_index()[cat_column]\n head_df_list = head_df[\"Cluster Name\"].tolist()\n try:\n cluster_matrix = hierarical_clustering(p_df, method)\n except Exception as e:\n raise Exception(\"Distance matrix has some issue:\"+str(e))\n # head_df.sort(\"Cluster Name\", inplace=True) # original\n head_df = head_df.sort_values([\"Cluster Name\"]) # changed by mukul\n head_df[\"Cluster Number\"] = range(1, no_of_prod + 1)\n head_df = change_column_order(head_df, \"Cluster Number\", 0)\n p_df = pd.DataFrame(cluster_matrix, columns=[\"Idj1\", \"Idj2\", \"SemipartialRSq\", \"priority\"])\n p_df[\"NumberOfClusters\"] = range(len(p_df),0,-1)\n p_df = format_column(p_df, \"Idj1\", no_of_prod, \"NumberOfClusters\")\n p_df = format_column(p_df, \"Idj2\", no_of_prod, \"NumberOfClusters\") \n p_df.drop(\"priority\", axis=1, inplace=True)\n p_df = change_column_order(p_df, \"NumberOfClusters\", 0)\n if not debug:\n p_df.to_excel(out_csv_names[0], index=False)\n head_df.to_excel(out_csv_names[1], index=False)\n return head_df, p_df, head_df_list, cluster_matrix", "def export_table (self,_w):\n try:\n _data = \"\"\n maxRow = _w.rowCount()\n maxColumn = _w.columnCount()\n for hc in range(0,maxColumn):\n try: _hci = str(_w.horizontalHeaderItem(hc).text())\n except:_hci=\"None\";pass\n if hc == (maxColumn-1) :_data += _hci\n elif hc < maxColumn:_data += \"%s,\" % _hci\n _data += \"\\n\"\n for r in range(0, maxRow):\n for c in range(0, maxColumn):\n _d = str(_w.item(r, c).text())\n if c == (maxColumn-1):_data += _d\n elif c < maxColumn:_data += \"%s,\" % _d\n _data += \"\\n\"\n options = QFileDialog.Options()\n saved_file, _ = QFileDialog.getSaveFileName(self, \"Save Table to file \", \"data\", \"Plain Text (*.txt);;CSV (*.csv);;All Files (*)\", options=options)\n _file = open(saved_file, 'w')\n _file.write(_data)\n _file.close()\n except FileNotFoundError:pass", "def save_csv(filename, rows):\n with open(filename, 'w', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow([\n 'title', 'runtime', 'genre(s)', 'director(s)', 'writer(s)',\n 'actor(s)', 'rating(s)', 'number of rating(s)'\n ])\n\n writer.writerows(rows)", "def write_tocsv(file_name, dataframe) :\n print(\"\\nSaved result to {}\\n\".format(file_name))\n dataframe.to_csv(file_name, mode='a', header=False,index=False)", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def column_output(self, output_fname, list_2d):\n\t\toutf = open(output_fname, 'w')\n\t\twriter = csv.writer(outf, delimiter='\\t')\n\t\tfor list in list_2d:\n\t\t\twriter.writerow(list)\n\t\tdel writer\n\t\toutf.close()", "def writeCSV(filename):\n if not filename.endswith('.csv'):\n filename += '.csv'\n with open(filename, 'wb') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for x in range(numRows):\n scores = quizScores()\n types = getTypes(quizScores())\n extra = []\n filewriter.writerow([x] + types + scores + extra)", "def csv(self, section=\"main\", column_headers=True):\n table_end = \"\\r\\n\\r\\n\\r\\n\"\n table_head_pos = self.header_dictionary[section]\n table_end_pos = self.raw_data[table_head_pos:].find(table_end)\n if table_end_pos == -1:\n table_end_pos = len(self.raw_data)\n else:\n table_end_pos += table_head_pos\n if column_headers:\n a = \",\".join(self.columns()) + \"\\n\"\n else:\n a = \"\"\n return a + self.raw_data[table_head_pos:table_end_pos].replace(\"\\t\",\",\").replace(\"\\r\",\"\")", "def write_csv(data):\n\n with open('data.csv', 'w') as file:\n writer = csv.DictWriter(file, fieldnames=data[0].keys())\n writer.writeheader()\n for row in data:\n writer.writerow(row)", "def write_csv(self, out_file_name, header):\n\n with open(out_file_name, 'wb') as outf:\n writer = csv.writer(outf, quoting=csv.QUOTE_ALL)\n writer.writerow(header)\n writer.writerows(self.records)", "def write_tsv(self, filename):\n f = open(filename,'wb')\n wr = csv.writer(f,delimiter='\\t',quoting=csv.QUOTE_ALL)\n colrow = []\n for col in self.cols:\n colrow.append('<undefined>' if len(col) == 0 else unicode(iter(col).next()).encode('unicode-escape'))\n wr.writerow(colrow)\n for row in self.data:\n strrow = []\n for cell in row:\n strrow.append('' if cell is None else unicode(cell).encode('unicode-escape'))\n wr.writerow(strrow)\n f.close()", "def write_csv(self, filename, cutoff=2):\n f = csv.writer(open(filename, 'wb'))\n for row in self.rows(cutoff=cutoff):\n f.writerow(row)", "def print_matrix_to_file(matrix, fileName):\n with open(fileName, 'w') as f:\n for row in matrix:\n print('\\t'.join(map(str, row)), file=f)", "def MatrixToFile(self):\n # open text file\n file = open(\"intersection_matrix.txt\", 'w')\n # write opening square bracket for matrix\n file.write(\"[\")\n # use for loop to write in the matrix\n for i in range(self.rows):\n # square brackets to append in elements of a row of the matrix\n mat = []\n if i != 0:\n # separate each row with a comma\n file.write(\",\")\n for j in range(self.cols):\n # append elements of the row\n mat.append(self.matrix[i][j])\n # avoid having space as the first row in the text file\n if i != 0:\n file.write(\"\\n\")\n # write in the row\n file.write(str(mat))\n # write closing bracket for the matrix\n file.write(\"]\")\n # close file\n file.close()\n return", "def write_csv(outputfile, delimiter, newline, qchar, encoding, header, rows):\n with open(outputfile, 'w', newline=newline, encoding=encoding) as csvfile:\n writer = csv.writer(csvfile, delimiter=delimiter,\n quotechar=qchar, quoting=csv.QUOTE_MINIMAL)\n writer.writerow(header)\n for row in rows:\n writer.writerow(row)", "def write_csv(self):\n with open(paths.CSV_FILE, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n assg = AssignmentConfig().get_assignment()\n writer.writerow([\"Student\"] + assg.get_test_list() + assg.get_programs_list() +\n [\"normalised_test_score\"] + [\"normalised_prog_score\"] + [\"total\"] + [\"total_rounded\"])\n\n for (submitter, submitter_data) in sorted(self.snapshot['results'].items()):\n total_score = submitter_data[\"normalised_test_score\"] + submitter_data[\"normalised_prog_score\"]\n total_rounded = round(total_score * 2) / 2 # total score rounded to nearest 0.5\n writer.writerow([submitter] +\n [submitter_data[\"tests\"][test] for test in sorted(submitter_data[\"tests\"])] +\n [submitter_data[\"progs\"][prog] for prog in sorted(submitter_data[\"progs\"])] +\n [submitter_data[\"normalised_test_score\"]] +\n [submitter_data[\"normalised_prog_score\"]] +\n [round(total_score, 2)] +\n [total_rounded])", "def toCsv(self, csv_path):\n ser = pd.Series(self)\n ser.to_csv(csv_path)", "def write_table_data(tableId: str, tableRows: list):\n filename = f'table_{tableId}.csv'\n with open(filename, 'w', newline='', encoding='utf-8') as f_:\n csv.writer(f_, quoting=csv.QUOTE_NONNUMERIC).writerows(tableRows)\n return filename", "def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise", "def writetoCSV(self, fileName):\n\n with open(fileName, 'w') as writeFile:\n writeFile.write(\"ID,Fx,Fy,Fz\\n\")\n for fstnr in F:\n writeFile.write(str(fstnr.ID))\n for i in fstnr.force:\n writeFile.write(',' + str(i))\n writeFile.write('\\n')", "def to_csv_file_obj(self, rows):\n output = StringIO.StringIO()\n writer = csv.writer(output)\n writer.writerows(rows)\n return output", "def generate_csv(fh, num_rows=1, num_columns=1, delimiter=','):\n df = pandas.DataFrame(np.random.randn(num_rows, num_columns))\n df.to_csv(path_or_buf=fh, sep=delimiter)", "def export_csv(self, outpath):\n\n\t\tself.df.to_csv(outpath)", "def writeDataCSV(data,outpath,outfile,out_head=None,message='data'):\n if (out_head is not None):\n #nhead = out_head.count(',') + 1\n nhead = len(out_head.split(',')) # Split header at every comma\n if (data.shape[1] != nhead):\n print('Warning: No. of fields does not match number of headings in', \n 'output file:',outfile+'.csv')\n print('No. fields =',data.shape[1],', No. headings =',nhead)\n filename = join(outpath, outfile + '.csv')\n print('Saving',message,'in file:',filename)\n np.savetxt(filename,data,delimiter=',',header=out_head) \n return None", "def write_csv(self, _dict, filename):\n with open(filename, 'w') as f:\n f.write('\"'+'\";\"'.join(_dict.keys())+'\"\\n')\n for i in np.arange(len(_dict[list(_dict.keys())[0]])):\n values = []\n for col in _dict.keys():\n try:\n values.append(str(_dict[col][i]))\n except IndexError as e:\n # LSTM don't have first times available because of lacking history\n pass\n f.write(';'.join(values)+'\\n')\n\n logging.info('Wrote {}'.format(filename))\n self._upload_to_bucket(filename, filename)", "def write_csv(table: Table, file: str, header: Sequence[str] = None) -> None:\n fieldnames = list(table[0].keys())\n for hdr in reversed(header):\n if hdr in fieldnames:\n fieldnames.remove(hdr)\n fieldnames.insert(0, hdr)\n\n with open(file, \"w\", encoding=\"utf-8-sig\", errors=\"replace\", newline=\"\") as csvfile:\n writer = DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for row in table:\n writer.writerow(row)", "def write_csv(elongation, file_name):\n e = elongation\n\n with open(file_name, 'w') as f:\n f.write(f\"\"\"\\\nBreak Load, {e.break_load()}\nBreak Strength, {e.break_strength()}\nBreak Elongation, {e.break_elongation()}\nYield Load, {e.yield_load()}\nYield Strength, {e.yield_strength()}\nYield Elongation, {e.yield_elongation()}\nGauge Length, {e.gauge_length}\nSample Width, {e.sample_width}\nSample Thickness, {e.sample_thickness}\n\nPoints\n %, N\"\"\")\n for x, y in zip(e.xs, e.ys):\n f.write(f'\\n{x:>8.4f}, {y:>8.4f}')", "def write_csv_file(filepath, fieldnames, rows):\n headers = [{'label': field} for field in fieldnames]\n with open(filepath, 'w') as f_buf:\n outfile = CsvWriter()\n outfile.set_headers(headers)\n outfile._datas = rows\n outfile.render(f_buf)", "def write_csv(fn, toCSV):\n keys = toCSV[0].keys()\n with open(fn, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(toCSV)", "def _export_csv(x, y, export_to):\r\n\r\n with open(export_to, 'w', newline='') as e:\r\n writer = csv.writer(e, delimiter=',')\r\n for i in range (0, len(x)):\r\n writer.writerow([x[i], y[i]])", "def MaterializeData(self, output_path):\n output_file_name = os.path.join(output_path, self.file_name)\n\n if self.verbose:\n print 'Writing file: %s' % output_file_name\n\n csv_output_file = open(output_file_name, 'wb')\n csv_writer = csv.writer(csv_output_file)\n\n for row in self.table_data:\n csv_writer.writerow(row)\n\n csv_output_file.close()", "def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()", "def writeCSV(csvPath, usedmpicommands, first_table_values,second_table_values,third_table_values, df):\n\n print(\"Saving CSV files in directory '\" + os.path.realpath(csvPath) +\"'\")\n\n #routine Summary by rank metrics table\n metric_csv_table = df.to_csv(sep=';')\n with open(os.path.join(csvPath,'routineSummaryByRank_metric_table.csv'), 'w') as outfileMetricTable:\n outfileMetricTable.write(metric_csv_table)\n outfileMetricTable.close()\n\n #routine Summary by rank data table (just the data from the instrumenation file in csv format)\n with open(os.path.join(csvPath,'routineSummaryByRank_summary.csv'), 'w') as outfileMPICommands:\n wr = csv.writer(outfileMPICommands, delimiter=';')\n wr.writerows(usedmpicommands)\n outfileMPICommands.close()\n\n #application Summary by rank data (first table)\n #Columns: \"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_1st_table.csv'), 'w') as outfile_first_table:\n wr = csv.writer(outfile_first_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"])\n wr.writerows(first_table_values)\n outfile_first_table.close()\n \n #application Summary by rank data (second table) \n #Columns: \"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_2st_table.csv'), 'w') as outfile_second_table:\n wr = csv.writer(outfile_second_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"])\n wr.writerows(second_table_values)\n outfile_second_table.close()\n\n #application Summary by rank data (third table)\n #Columns: \"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_3rd_table.csv'), 'w') as outfile_third_table:\n wr = csv.writer(outfile_third_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"])\n wr.writerows(third_table_values)\n outfile_third_table.close()\n\n #In case, you are wondering, where the last part of the instrumentation file is (message Summary by rank),\n #it is currently not saved as a csv file. This is because:\n #\n #1st: In the platform_mpi instrumentation file, the data is somehow visualized beautifully\n #2nd: It is very hard to save the data in a 2-dimensional csv file format\n #Therefore we decided, not to export this data in a csv file format", "def generate_csv(inf, outf):\n o = csv.writer(outf)\n o.writerow(COLUMNS)\n for row in reformat_data(inf):\n o.writerow([inf.name] + row)", "def produce_solution(y):\n\n with open('out.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', lineterminator=\"\\n\")\n writer.writerow(['id', 'y'])\n for i in range(y.shape[0]):\n writer.writerow([i, y[i]])", "def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)", "def to_csv(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving results into a csv (comma separated values) file.\")\n v=np.array([list(self.initialConcentration.values()),\n list(self.fitting_error.values()),\n list(self.k.values()),\n list(self.Fb.values()),\n list(self.slope.values())]).T\n k=list(self.initialConcentration.keys())\n d=pd.DataFrame(v,columns=['Initial Concentration','Fitting Error','k','Fb','Slope'],index=k)\n fn=get_valid_fname(self.ID)\n self.csvname=\"%s_initial_concentrations.csv\"%(fn)\n self.fullcsvname=\"%s/%s_initial_concentrations.csv\"%(self.info['resultsdir'],fn)\n self.info['csvname_initialConcentration']=self.csvname\n print(self.csvname)\n d.to_csv('%s/%s'%(self.info['resultsdir'],self.csvname))", "def write(self, data, filename=None):\n if not filename:\n filename = self.output_csv\n\n with open(filename, \"w\") as _file:\n writer = csv.writer(_file)\n\n writer.writerow(list(_ for _ in self.header()))\n writer.writerows(data)", "def save_cosine_matrix_df(cosine_matrix_df, run_parameters):\n new_file_name = kn.create_timestamped_filename(\"cosine_matrix\", \"df\")\n cosine_matrix_df.to_csv(\n os.path.join(run_parameters['results_directory'], new_file_name), header=True, index=True, sep='\\t')", "def write_mrc_matrix(self):\n\n matrix = self.matrix\n \n matrix = self.permute_matrix_to_map_axis_order(matrix)\n a = Numeric.ravel(matrix)\n \n data = a.tostring()\n\n file_write = open(self.path,'ab')\n file_write.write(data)\n file_write.close()", "def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')", "def write_csv(row_list,out_name,*header_strings : str):\n with open(out_name,'w',newline='') as result_file:\n wr = csv.writer(result_file, delimiter='\\t')\n if header_strings:\n wr.writerow([name for name in header_strings])\n if type(row_list[0]) is list:\n wr.writerows(row_list)\n else:\n for row in row_list:\n wr.writerow([row])", "def matrix_export(request, simulation, demandsegment):\n matrix = demandsegment.matrix\n matrix_couples = Matrix.objects.filter(matrices=matrix)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n seed = np.random.randint(10000)\n filename = '{0}/website_files/exports/{1}.tsv'.format(settings.BASE_DIR,\n seed)\n with codecs.open(filename, 'w', encoding='utf8') as f:\n writer = csv.writer(f, delimiter='\\t')\n # Get a dictionary with all the values to export.\n values = matrix_couples.values_list('p__user_id', 'q__user_id', 'r')\n # Write a custom header.\n writer.writerow(['origin', 'destination', 'population'])\n writer.writerows(values)\n with codecs.open(filename, 'r', encoding='utf8') as f:\n # Build a response to send a file.\n response = HttpResponse(f.read())\n response['content_type'] = 'text/tab-separated-values'\n response['Content-Disposition'] = 'attachement; filename=od_matrix.tsv'\n # We delete the export file to save disk space.\n os.remove(filename)\n return response", "def writeToCSV(self, filepath):\r\n\t\twith open(filepath, 'w') as outputFile:\r\n\t\t\toutputFile.write(str(self))", "def a_csv(coords,nums, file_name):\n \n output = pd.DataFrame(coords, columns=['coords'])\n output['nums'] = nums\n\n\n \n output.to_csv(file_name, index = False)\n \n return output", "def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)", "def write_csv(file_name, data):\n\n with open(file_name, \"w\") as fp:\n\n writer = RiscvInstructionTraceCsv(fp)\n writer.start_new_trace()\n\n for entry in data:\n writer.write_trace_entry(entry)", "def write_to_csv(box_regions, num_of_cols, num_of_rows, filename=\"out.csv\"):\r\n\r\n cwriter = csv.writer(open(filename, 'w'))\r\n\r\n digit_list = []\r\n for region in box_regions:\r\n # performing Neural Network algorithm to get predicted digits\r\n digits = performRecognition.get_decimal_in_box(region)\r\n\r\n # sort the digits\r\n digits = sort_digits(digits, key=lambda digit: digit[1][0])\r\n\r\n print (digits)\r\n\r\n digit_str = ''\r\n for digit in digits:\r\n digit_str += str(digit[0])\r\n\r\n digit_list.append(digit_str)\r\n\r\n # writes the digits into the csv\r\n for i in range(0, num_of_rows*num_of_cols, num_of_cols):\r\n cwriter.writerow(digit_list[i:i+num_of_cols])", "def write_file(file):\n file.to_csv('data_set.csv', encoding='utf-8', index=False)", "def save_table(data, out_file):\n logging.info(\"Saving table\")\n #header, data = data\n #out = pd.DataFrame(data=data, columns = header.keys())\n joblib.dump(data, out_file)", "def write_csv(self, file, collapse=False):\n # Write header row\n file.write('Timestamp')\n for x in self.measures:\n file.write(',' + x)\n file.write('\\n')\n\n if collapse:\n last_ts = None\n next_write = None\n # Write content\n for x in self.records:\n if last_ts is None:\n last_ts = x[0]\n next_write = x\n elif x[0] == last_ts:\n next_write = x\n else:\n if next_write is not None:\n self.__write_record(file, next_write)\n last_ts = x[0]\n next_write = x\n if next_write is not None:\n self.__write_record(file, next_write)\n else:\n # Write content\n for x in self.records:\n self.__write_record(file, x)", "def write_csv(\n header,\n contents,\n sep=\",\",\n filename=\"stdout\",\n thousands=False,\n tee=False,\n align=True,\n comment=False,\n):\n from jcvi.formats.base import must_open\n\n formatted = load_csv(header, contents, sep=sep, thousands=thousands, align=align)\n if comment:\n formatted[0] = \"#\" + formatted[0][1:]\n formatted = \"\\n\".join(formatted)\n output = must_open(filename, \"w\")\n print(formatted, file=output)\n if tee and filename != \"stdout\":\n print(formatted)", "def write_csv(path, lines, headers):\n print \"Opening %s for score output\" % base_name(path)\n\n try:\n f = open(path, 'wb')\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(lines)\n except IOError:\n print \"Cannot open %s\" % path\n else:\n print \"Scores successfully written to %s\" % path\n f.close()", "def to_csv(self, filename):\n self.data.to_csv(filename)", "def to_csv(self, filename):\n self.data.to_csv(filename)", "def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()" ]
[ "0.75623345", "0.7330622", "0.71900606", "0.71231455", "0.71107894", "0.7031687", "0.7024952", "0.70113516", "0.697658", "0.69399834", "0.6934993", "0.69087684", "0.69058", "0.6873836", "0.6852782", "0.6852024", "0.6777883", "0.676096", "0.6755106", "0.67167455", "0.6708375", "0.6671856", "0.66679996", "0.6666607", "0.6664351", "0.66559476", "0.6622331", "0.6607635", "0.6579855", "0.6562081", "0.6541949", "0.65414375", "0.65411264", "0.6538029", "0.65349674", "0.6530032", "0.65135086", "0.6511141", "0.64936465", "0.64916646", "0.6486218", "0.6478671", "0.64785576", "0.6449339", "0.6447518", "0.6443942", "0.6411107", "0.6405048", "0.6389545", "0.63860345", "0.6381933", "0.6367273", "0.6358088", "0.6356117", "0.6349757", "0.6346491", "0.6324473", "0.6311612", "0.6308248", "0.63025", "0.6298694", "0.62886333", "0.6287652", "0.62854207", "0.62779176", "0.6277606", "0.6275338", "0.62700856", "0.6269866", "0.6265854", "0.62623847", "0.6261674", "0.62585086", "0.62481546", "0.6245698", "0.6242216", "0.62359315", "0.6231445", "0.6218812", "0.6217436", "0.6207604", "0.62045825", "0.62043303", "0.62042457", "0.61980903", "0.6188524", "0.61872566", "0.61872286", "0.6184116", "0.61828715", "0.6178757", "0.6176401", "0.61646473", "0.6163172", "0.6158536", "0.61581874", "0.61470383", "0.6146132", "0.6146132", "0.61448133" ]
0.76784086
0
Name a city and the country it resides in seperated by a comma.
def city_country(city, country): print(f'"{city.title()}, {country.title()}"\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def city_names(city, country):\n place = f\"{city}, {country}\"\n return place.title()", "def city_country(city, country):\n city_and_country = city + ', ' + country\n return city_and_country.title()", "def city_country(city_name, country_name):\n city_country_combo = city_name + ', ' + country_name\n return city_country_combo.title()", "def city_country(city, country):\n return city.title() + \", \" + country.title()", "def city_country(city, country):\r\n\treturn(city.title() + ', ' + country.title())", "def city_country(city, country):\n return(city.title() + \", \" + country.title())", "def city_country(city, country):\n return(city + ', ' + country)", "def city_country(city_name, country_name):\n combi = f\"{city_name.title()}, {country_name.title()}\"\n return combi.title()", "def city_country(city, country):\n c_c = '\"' + city + \", \" + country + '\"'\n return c_c.title()", "def city_country(city, country):\n full_city = city + \", \" + country\n return full_city.title()", "def city_country(city, country):\n pair = f\"{city.title()}, {country.title()}\"\n return pair", "def city_country(city, country):\n place = f\"{city}, {country}\"\n return place.title()", "def city_country(city, country):\n return f\"{city.title()}, {country.title()}\"", "def city_country(city, country):\n return f\"{city.title()}, {country.title()}\"", "def city_country(city, country):\n formatted_city_country = city.title() + \", \" + country.title() + \".\"\n return formatted_city_country", "def city_country(city_name, country):\n formatted_string = f\"{city_name.title()}, {country.title()}\"\n return formatted_string", "def city_country(city, country, population):\n output_string = f\"{city.title()}, {country.title()}\"\n output_string += f\" -population {population}\"\n return output_string", "def describe_city(name, country='united kingdom'):\n print(f\"\\n{name.title()} is in {country.title()}.\")", "def describe_city(city, country='Germany'):\n\tprint(f'{city.title()} is in {country.title()}.')", "def city_country(city, country, population=0):\n if population:\n formatted_city_country = city.title() + \", \" + country.title() + \" - \"\n formatted_city_country += \"population \" + str(population)\n else:\n formatted_city_country = city.title() + \", \" + country.title()\n return formatted_city_country", "def describe_city(city, country='Japan'):\n print(city.title() + \" is in \" + country.title() + \".\")", "def get_city_country(city, country, population=''):\n if population:\n city_country = f\"{city}, {country} - population {population}\"\n else:\n city_country = f\"{city}, {country}\"\n return city_country.title()", "def get_city_country(city, country, population=''):\n if population:\n location = city + ' ' + country + ' ' + str(population)\n return location.title()\n\n else:\n location = city + ' ' + country\n return location.title()", "def describe_city(city, country='canada'):\n print(f\"{city.title()} is in {country.title()}.\")", "def describe_city(city, country='New Zealand'):\n\tprint(f\"\\nThe city of {city} is in {country}.\")", "def get_formatted_location(city, country):\n\tformatted_location = city + \", \" + country\n\treturn formatted_location.title()", "def city_location(city, country, population = None):\n if population:\n full_str = f\"{city.title()}, {country.title()} - population {population}\"\n else:\n full_str = f\"{city.title()}, {country.title()}\"\n return full_str", "def get_formatted_city(city, country, population=None):\n if population:\n city_country = f\"{city.title()}, {country.title()} - population {population}.\"\n else:\n city_country = f\"{city.title()}, {country.title()}.\"\n\n return city_country", "def ad_rep_city_state(obj):\n return '%s, %s' % (obj.ad_rep.geolocation_object.us_city.name,\n obj.ad_rep.geolocation_object.us_state.abbreviation)", "def get_city_country(city, country, population=''):\n\tif population:\n\t\tcity_country = city.title() + ', ' + country.title() + \" - population \" + str(population)\n\telse:\n\t\tcity_country = city.title() + ', ' + country.title()\n\treturn city_country", "def get_location(city, country, population=\"\"):\n\tdetails = city.title() + \", \" + country.title()\n\t#if population is specified i.e. not default value, append\n\tif population:\n\t\tdetails += \" - population \" + str(population)\n\t#in either case, return details\n\treturn details", "def test_city_country(self):\n formatted_name = city_country('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')", "def name_places(self):\n self.city_names = {}\n self.region_names = {}\n for city in self.cities:\n self.city_names[city] = self.lang.name(\"city\")\n for region in np.unique(self.territories):\n self.region_names[region] = self.lang.name(\"region\")", "def test_city_country(self):\n santiago_chile = get_city_name('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')", "def get_city_state(self):\n\n city = self.city\n return f'{city.name}, {city.state}'", "def get_cities(self, city_name: str = \"\"):", "def test_city_country(self):\n formatted_name = make_formatted_name('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')", "def _build_city(db, place):\n location = get_main_location(db, place)\n county = location.get(PlaceType.COUNTY)\n # Build a title description string that will work for Eniro\n city_descr = _build_area(db, place)\n if county:\n city_descr += ', ' + county\n return _strip_leading_comma(city_descr)", "def test_city_country(self):\n formatted_city = get_full_city(\"santiago\", \"chile\")\n self.assertEqual(formatted_city, \"Santiago, Chile\")", "def description(self):\n\t\treturn \"%s, %s\" % (self.name, self.country)", "def get_state_info(city, state, population=''):\n if population:\n city_info = city + ' ' + state + ' - ' + population\n city_info.title()\n else:\n city_info = city + ' ' + state\n city_info.title()\n return city_info", "def get_formatted_place(city, country):\n place = f\"{city} {country}\"\n return place.title()", "def country() -> str:", "def to_location(city: str = None, state: str = None) -> str:\n if city is None and state is None:\n return \"USA\"\n elif city is None or state is None:\n return f\"{city or ''}{state or ''}, USA\"\n return f\"{city}, {state} USA\"", "def test_city_country_population(self):\n formatted_name = make_formatted_name('santiago', 'chile', 5000000)\n self.assertEqual(formatted_name, 'Santiago, Chile - population 5000000')", "def get_cities(self, city_name: str = None):", "def test_city_country(self):\n\t\tformatted_address = city_country('santiago', 'chile')\n\t\tself.assertEqual(formatted_address, 'Santiago, Chile')", "def print_cities(g):\n cities = []\n for key in g.city_dict:\n cities.append(g.city_dict[key].get_name())\n return cities", "def print_cities() -> None:\n city_list = get_cities()\n i = 1\n for a,b,c,d,e in zip(city_list[::5], city_list[1::5], city_list[2::5], city_list[3::5], city_list[4::5]):\n a = str(i) + f\".{a}\"\n i += 1\n b = str(i) + f\".{b}\"\n i += 1\n c = str(i) + f\".{c}\"\n i += 1\n d = str(i) + f\".{d}\"\n i += 1\n e = str(i) + f\".{e}\"\n i += 1\n if e == '410.youngstown':\n print(\"{:<20}{:<20}{:<20}{:<20}{:<}\".format(a,b,c,d,e))\n a = str(i) + f\".{city_list[i-1]}\"\n b = str(i+1) + f\".{city_list[i]}\"\n i += 1\n c = c = str(i+1) + f\".{city_list[i]}\"\n print(\"{:<20}{:<20}{:<}\".format(a,b,c))\n else:\n print(\"{:<20}{:<20}{:<20}{:<20}{:<}\".format(a,b,c,d,e))", "def city_parser(city: str = None):\n return city.text.strip().split(',')[1]", "def test_city_country_population(self):\n santiago_chile = get_city_name('santiago', 'chile','5000000')\n self.assertEqual(santiago_chile, 'Santiago, Chile - population 5000000')", "def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')", "def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')", "def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')", "def test_city_country_population(self):\n\t\tformatted_address = city_country(\n\t\t\t'santiago', 'chile', '5000000')\n\t\tself.assertEqual(formatted_address, \n\t\t\t'Santiago, Chile - Population 5000000')", "def update_city_name(name):\r\n if ', WA' or ',WA' in name:\r\n name = name.rstrip (', WA')\r\n return string.capwords(name)", "def get_city(string):\n city = \"\"\n previous_ch = None;\n\n #For each character in string\n for ch in string:\n #break if it is a comma, the city has been completed\n if ch == \",\":\n break\n #if the character is a letter, add it to the \"city\" string\n elif ch.isalpha():\n city += ch\n #if the character is a space, and the previous character is a letter, add the space to the \"city\" string. (This prevents duplicate spaces)\n elif ch.isspace() & previous_ch.isalpha():\n city += ch\n\n #update previous character\n previous_ch = ch\n\n return city", "def city(self):\n # type: () -> string_types\n return self._city", "def city(self):\r\n try:\r\n return str(self.connect()['name'])\r\n except:\r\n return '@weather_city'", "def display_name(self):\n if self.name and self.address and self.name != self.address:\n return \", \".join([self.name, self.address])\n return self.name", "def province_names(self):\n countries = self.province.select_related('country__province').order_by(\n 'country__province__province_name')\n\n province_names = []\n\n for country in countries:\n if not country.province:\n continue\n province_name = country.province.province_name\n if province_name not in province_names:\n province_names.append(province_name)\n return ', '.join(province_names)", "def get_country_names(data, world=True):\n if world:\n return ['World'] + data['country_long'].unique().tolist()\n else:\n return data['country_long'].unique().tolist()", "def expected_city_names_fixture():\n return {'b', 'a', 'c'}", "def format_cityname(fname):\n\taus_cities = dict(\n\t\tm = 'melbourne',\n\t\ts ='sydney',\n\t\tc = 'canberra',\n\t\tp ='perth',\n\t\tb ='brisbane',\n\t\th = 'hobart',\n\t\ta = 'adelaide')\n\tif fname in aus_cities.keys():\n\t\treturn aus_cities[fname]\n\telse:\n\t\treturn 'melbourne'", "def _city_code(self, cr, uid, ids, field_name, arg, context=None):\n res = {}\n if context is None:\n context = {}\n for line in self.browse(cr, uid, ids, context=context):\n res[line.id] = len(line.dep) < 3 and \\\n (line.dep + ((3 - len(line.com)) * \"0\") + line.com) or \\\n (line.dep + ((2 - len(line.com)) * \"0\") + line.com)\n return res", "def get_city_info(g, city_name):\n flag = 0\n for key in g.city_dict:\n if(g.city_dict[key].get_name() == city_name):\n print g.city_dict[key].get_info()\n flag = 1\n \n if(flag == 0):\n print (\"Invalid Input\")", "def test_city_country(self):\n dublin_ireland = city_country('dublin', 'ireland')\n self.assertEqual(dublin_ireland, 'Dublin, Ireland')", "def filter_city(input_city: str) -> str:\n # input_city = string.capwords(input_city.lower())\n result = filterString(input_city).cities\n return result", "def _get_countries():\n print('-c, -C [country]\\\n \\n [country]=\\\n \\n AR\\t: Argentina\\\n \\n AT\\t: Austria\\\n \\n BR\\t: Brazil\\\n \\n BY\\t: Belarus\\\n \\n CA\\t: Canda\\\n \\n DE\\t: Germany\\\n \\n FR\\t: France\\\n \\n GB\\t: Great Britain\\\n \\n GH\\t: Ghana\\\n \\n HU\\t: Hungary\\\n \\n ID\\t: Indonesia\\\n \\n IL\\t: Israel\\\n \\n JP\\t: Japan\\\n \\n KR\\t: Korea\\\n \\n MA\\t: Morocco\\\n \\n MY\\t: Malaysia\\\n \\n NL\\t: Netherlands\\\n \\n NO\\t: Norway\\\n \\n OM\\t: Oman\\\n \\n PK\\t: Pakistan\\\n \\n RU\\t: Russia\\\n \\n SA\\t: Saudi Arabia\\\n \\n TH\\t: Thailand\\\n \\n TW\\t: Taiwan\\\n \\n UA\\t: Ukraine\\\n \\n US\\t: United States\\\n \\n UY\\t: Uruguay\\\n \\n VE\\t: Venezuela\\\n \\n VN\\t: Vietnam\\\n \\n .....\\n common usage: opengate -c JP')", "def clean_city(self):\n return self.cleaned_data['city'].strip().title()", "def _derive_country_IE(place):\n derived = []\n if _COUNTY_REGEX.search(place.name):\n stripped = _COUNTY_REGEX.sub(\"\", place.name.lower())\n derived += [\"co \" + stripped, \"county \" + stripped]\n\n #\n # Alternative name cases that aren't as straightforward as the above.\n #\n try:\n derived += {\n \"loch garman\": [\"co wexford\"],\n \"uíbh fhailí\": [\"co offaly\"],\n \"maigh eo\": [\"co mayo\"],\n \"an iarmhí\": [\"co westmeath\"],\n }[place.name.lower()]\n except KeyError:\n pass\n\n return [DerivedName(text, \"en\") for text in derived]", "def get_country_details(self,country):\n try:\n country_obj = pycountry.countries.get(name=country)\n if country_obj is None:\n c = pycountry.countries.search_fuzzy(country)\n country_obj = c[0]\n continent_code = pc.country_alpha2_to_continent_code(country_obj.alpha_2)\n continent = pc.convert_continent_code_to_continent_name(continent_code)\n return country_obj.alpha_3, continent\n except:\n if 'Congo' in country:\n country = 'Congo'\n elif country == 'Diamond Princess' or country == 'Laos' or country == 'MS Zaandam' or country == 'Holy See' or country == 'Timor-Leste':\n return country, country\n elif country == 'Korea, South' or country == 'South Korea':\n country = 'Korea, Republic of'\n elif country == 'Taiwan*':\n country = 'Taiwan'\n elif country == 'Burma':\n country = 'Myanmar'\n elif country == 'West Bank and Gaza':\n country = 'Gaza'\n else:\n return country, country\n country_obj = pycountry.countries.search_fuzzy(country)\n continent_code = pc.country_alpha2_to_continent_code(country_obj[0].alpha_2)\n continent = pc.convert_continent_code_to_continent_name(continent_code)\n return country_obj[0].alpha_3, continent", "def parse(self, item: str) -> Tuple[str, str]:\n try:\n city, *_, country = item.split(',')\n except ValueError:\n return '', item\n return city.strip(), country.strip()", "def city(self) -> str:\n return pulumi.get(self, \"city\")", "def seperate_City_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n values = dictionary.values()\n res = []\n for elem in keys:\n state = elem[1].strip()\n city = elem[0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append(city)\n return res, list(values)", "def test_city_country_population(self):\n your_location = location_name(\"lviv\", \"ukraine\", \"123\")\n self.assertEqual(your_location, \"Lviv, Ukraine - Population 123\")", "def _derive_country_JP(place):\n derived = []\n if _JP_FU_SUFFIX.search(place.asciiname):\n bare = _JP_FU_SUFFIX.sub(\"\", place.asciiname)\n derived += [bare, bare + \" prefecture\", bare + \" pref\"]\n elif _JP_KEN_SUFFIX.search(place.asciiname):\n bare = _JP_KEN_SUFFIX.sub(\"\", place.asciiname)\n derived += [bare, bare + \" prefecture\", bare + \" pref\",\n bare + \"-ken\", bare + \" ken\"]\n elif _JP_SHI_SUFFIX.search(place.name):\n bare = _JP_SHI_SUFFIX.sub(\"\", place.name)\n derived += [bare, bare + \"-city\", bare + \" city\"]\n elif _JP_KU_SUFFIX.search(place.name):\n bare = _JP_KU_SUFFIX.sub(\"\", place.name)\n derived += [bare, bare + \"-ku\", bare + \" ku\", bare + \" ward\"]\n\n en_names = [DerivedName(text.lower(), \"en\") for text in derived]\n _LOGGER.debug(\"derive_country_JP: en_names: %r\", en_names)\n\n if _JA_JP_SHI_SUFFIX.search(place.name):\n bare = _JA_JP_SHI_SUFFIX.sub(\"\", place.name)\n ja_names = [DerivedName(bare, \"ja\")]\n else:\n ja_names = []\n return en_names + ja_names", "def get_country(self, field_name='COUNTRY'):\n default = self.get_default(field_name)\n if default != '' and default != None:\n return '%s' % str(default).split(',')[-1].strip()\n return ''", "def GetUsCities():\n return GetDataFromCsvFile('us_cities.csv')", "def addCity(self, city):\n if city:\n self.city_lbx.insert(0, str(city).strip())\n self.city_ent.delete(0, \"end\")\n self.city_ent.focus()", "def name():\n fname = input(\"Enter your first name:\")\n lname = input(\"Enter your last name:\")\n city = input(\"Enter the city you live in:\")\n state = input(\"Enter the state you live in:\")\n\n fullname = fname+ \" \"+ lname\n city_state = city +\", \" +state\n print(\"Your name is:\",fullname)\n print(\"You live in:\",city_state)", "def city_country_select():\n q = \"\"\"\n SELECT airport_city, airport_country\n FROM airport\n GROUP BY airport_city, airport_country\"\"\"\n cursor = connection.cursor()\n cursor.execute(q)\n city_country = cursor.fetchall()\n cursor.close()\n return city_country", "def important_cities(self,\n cities_path=r\"/mnt/data/shared/important_cities.csv\"):\n\n df_cities = pd.read_csv(cities_path)\n\n for i, name in enumerate(list(df_cities.city)):\n plt.plot(df_cities.long[i], df_cities.lat[i],\n marker=self.city_marker,\n color=self.city_markercolor,\n markersize=self.city_markersize)\n\n plt.annotate(name,\n (df_cities.long[i]+0.03, df_cities.lat[i]),\n fontsize=self.fontsize)", "def __repr__(self) -> str:\n return \"Country(\\\"\" + self.name + \"\\\", \" + str(self.population) + \", \" + str(self.area) + \")\"", "def return_city(n):\n if n == 1:\n return \"San Francisco\"\n elif n == 2:\n return \"Los Angeles\"\n elif n == 3:\n return \"Las Vegas\"\n elif n == 4:\n return \"Portland\"\n elif n == 5:\n return \"San Diego\"\n else:\n return \"Seattle\"", "def city(self):\n\n try:\n city = self.status.place[\"full_name\"].strip(r\",[A-Z ]\")\n except TypeError:\n city = None\n if not city:\n try:\n city = self.metadata.as_dict.get(\"user_city\").get(\"google_geocoding\")\n except (TypeError, AttributeError):\n city = None\n return city", "def account_name(self):\n return self.civic_no_city()", "def create_countries(name_countries,origin='united kingdom',beta=0.2,gamma=0.1,I0=10,Horizon=horizon):\n countries = []\n for country in name_countries:\n if country == origin:\n c = Country(name=country,N=df_countries['population'].loc[country],beta=beta,gamma=gamma,I0=I0,H=Horizon)\n else:\n c = Country(name=country,N=df_countries['population'].loc[country],beta=beta,gamma=gamma,I0=0,H=Horizon)\n countries.append(c)\n return countries", "def save_csv(outfile, cities):\n writer = csv.writer(outfile)\n writer.writerow(['Name'])\n for row in cities:\n writer.writerow([row])", "def _get_information(self):\n grid = self._tab.find(\"div\", class_=\"details grid show\")\n cities = grid.find_all(\"li\", attrs={'data-type': 'city'})\n return [self._get_text(city) for city in cities]", "def GetWorldCities():\n return GetDataFromCsvFile('world_cities.csv')", "def _derive_country_MX(place):\n lname = place.name.lower()\n derived = []\n match = _PARENTHETICAL.search(lname)\n if match:\n derived.append(_PARENTHETICAL.sub(\"\", lname).strip())\n derived.append(match.group(1).strip())\n\n if _MX_COLONIA.search(place.name):\n derived.append(_MX_COLONIA.sub(\"col\", lname))\n\n if _MX_DELEG.search(place.name):\n derived.append(_MX_DELEG.sub(\"delegación\", lname))\n derived.append(_MX_DELEG.sub(\"del\", lname))\n derived.append(_MX_DELEG.sub(\"deleg\", lname))\n\n if _MX_CIUDAD.search(place.name):\n derived.append(_MX_CIUDAD.sub(\"cd\", lname))\n\n alternative_names = _MX_SUPPORT[\"alternative_names\"][\"es\"]\n try:\n derived += alternative_names[lname]\n except KeyError:\n pass\n\n return [DerivedName(text, \"es\") for text in derived]", "def __str__(self):\n # rajon_id = LookupRajon.objects.filter(region_id__regions__contains='Киевская')\n return '%s, %s, %s' % (self.last_name, self.first_name, self.patron_name)", "def day_009_2():\n travel_log = [\n {\n \"country\": \"France\",\n \"visits\": 12,\n \"cities\": [\"Paris\", \"Lille\", \"Dijon\"]\n },\n {\n \"country\": \"Germany\",\n \"visits\": 5,\n \"cities\": [\"Berlin\", \"Hamburg\", \"Stuttgart\"]\n },\n ]\n\n def add_new_country(country, visits, cities):\n # Add new information to the travel log\n travel_log.append({\"country\": country, \"visits\": visits, \"cities\": cities})\n\n # Print the new information in readable syntax.\n print(f\"You've visited {country} {visits} times.\")\n\n # Assemble readable output for the list using commas, 'and', and a period.\n output_string = \"\"\n num_cities = len(cities)\n for city in cities:\n if num_cities == len(cities):\n # If this is our first city, just add it to the output string.\n output_string = city\n elif num_cities > 1:\n # If this is not the first city OR the last city, add it with a comma.\n output_string = output_string + f\", {city}\"\n else:\n # If this is our last city, use the word 'and' and end with a period.\n output_string = output_string + f\" and {city}.\"\n num_cities = num_cities - 1\n # Print the cities added to the travel log\n print(f\"You've been to {output_string}\")\n\n add_new_country(\"Russia\", 2, [\"Moscow\", \"Saint Petersburg\"])\n print(travel_log)", "def continents(g):\n continents = []\n for key in g.city_dict:\n if(g.city_dict[key].get_continent() not in continents):\n continents.append(g.city_dict[key].get_continent())\n \n for continent in continents:\n print(\"{}: \").format(continent)\n for key in g.city_dict:\n if(g.city_dict[key].get_continent() == continent):\n print(\" {}\").format(g.city_dict[key].get_name())", "def get_CityName():\n return str(root.find('provincia').text) # root.find('province') returns the direct child 'province' of root. ...\n # ... An equivalent way to get the same result is ( root[3].text ), where ...\n # ... root[2] represents 'province' tag and it's the 4th direct child of root.", "def getLocationString(self):\n street = ' '.join(self.context.getAddress().strip().split())\n # Remove Postfach from street, otherwise Google geocoder API will\n # return wrong results\n street = street.replace('Postfach', '').replace('\\r','').strip()\n zip_code = self.context.getZip()\n city = self.context.getCity()\n country = self.context.getCountry()\n\n # We need at least something other than country to be defined,\n # otherwise we can't do a meaningful geocode lookup\n if not (street or zip_code or city):\n return ''\n\n # Concatenate only the fields with a value into the location string\n location = country\n for field in [city, zip_code, street]:\n if field.strip():\n location = \"%s, %s\" % (field.strip(), location)\n\n return location", "def add_city(g, code, name, country, continent, timezone, coordinates, population, region):\n port = Ports(code, name, country, continent, timezone, coordinates, population, region)\n g.city_dict[code] = port\n g.convert[name] = code \n return g", "def civic_no_city(self):\n return self.civic.replace(', VANCOUVER', '')", "def create_list_csv_by_city(self, file_name, city_name):\n\n #We couldn't make it for this hackathon because we hadn't enough data and especially good data\n pass" ]
[ "0.8305632", "0.8101193", "0.80840385", "0.80514556", "0.8050137", "0.7992045", "0.79826546", "0.7955863", "0.79475707", "0.7865331", "0.7797506", "0.77545387", "0.7729826", "0.7729826", "0.7728276", "0.75599986", "0.75591683", "0.72416097", "0.7220424", "0.71964306", "0.71757674", "0.7167174", "0.6992617", "0.69634086", "0.68880284", "0.688486", "0.685549", "0.680074", "0.6681656", "0.66727906", "0.66279864", "0.66043365", "0.65290487", "0.6499814", "0.6457884", "0.64519346", "0.6436336", "0.6405711", "0.63978297", "0.6385637", "0.63318574", "0.63212186", "0.6299682", "0.6285109", "0.62416834", "0.6231127", "0.61896527", "0.6168752", "0.6116136", "0.60586077", "0.6043037", "0.5939121", "0.5939121", "0.5939121", "0.5859315", "0.58543915", "0.5854095", "0.5780875", "0.57595694", "0.5757546", "0.57521886", "0.57454526", "0.5726508", "0.5698689", "0.56981283", "0.56816983", "0.56815654", "0.56802994", "0.56652653", "0.56518227", "0.56502575", "0.5641653", "0.56414026", "0.5631775", "0.56134325", "0.5588305", "0.5585017", "0.5556762", "0.5536466", "0.553056", "0.5529201", "0.55276346", "0.55263335", "0.54955405", "0.54848266", "0.5476239", "0.5474238", "0.53719974", "0.53454304", "0.53415656", "0.53341204", "0.53309226", "0.5327454", "0.5326166", "0.5316547", "0.5315004", "0.53123516", "0.5307924", "0.53016", "0.5301144" ]
0.760025
15
Construct a DCEL from the output of matplotlib.delaunay.delaunay.
def from_delaunay_triangulation(cls, xl, yl, triangles, circumcentres): def add_containing_face_to_dcel(): containing_face_edges = [edge for edge in dcel.edges if not edge.nxt] edge = containing_face_edges.pop() face = Face(outer_component=None, inner_components=[edge]) dcel.faces.append(face) first_edge = edge previous_edge = [ e for e in containing_face_edges if e.get_destination() == edge.origin ] edge.prev = previous_edge[0] while len(containing_face_edges) > 1: edge.incident_face = face next_edge = [ e for e in containing_face_edges if e.origin == edge.get_destination() ] edge.nxt = next_edge[0] next_edge[0].prev = edge edge = next_edge[0] containing_face_edges.remove(next_edge[0]) edge_2 = containing_face_edges.pop() edge.incident_face = face edge_2.incident_face = face edge_2.prev = edge edge_2.nxt = first_edge edge.nxt = edge_2 def add_triangle_edges(circumcentre): triangles_edges = [] for vertex_idx, origin in enumerate(triangle_vertices): # Destination of the edge in this triangle that has vertex as origin destination = triangle_vertices[(vertex_idx + 1) % 3] edge_1 = HalfEdge(origin) edge_2 = HalfEdge(destination, twin=edge_1) edge_1.twin = edge_2 edge_1 = dcel.add_edge(edge_1) edge_2.twin = edge_1 edge_2 = dcel.add_edge(edge_2) edge_1.twin = edge_2 triangles_edges.append(edge_1) triangle_face = Face(triangles_edges[0], circumcentre=list(circumcentre)) dcel.faces.append(triangle_face) # Set previous and next of the edges for edge_idx, edge in enumerate(triangles_edges): edge.nxt = triangles_edges[(edge_idx + 1) % 3] edge.prev = triangles_edges[(edge_idx + 3 - 1) % 3] edge.incident_face = triangle_face triangle_vertices[edge_idx].incident_edge = edge dcel = cls() for t_idx, t in enumerate(triangles): triangle_vertices = [ dcel.add_vertex(Vertex(x)) for x in du.get_triangle_vertices(xl, yl, t) ] add_triangle_edges(circumcentres[t_idx]) add_containing_face_to_dcel() return dcel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mesh(self):\n from scipy.spatial import Delaunay\n points = self.cluster.get_positions()\n delaunay = Delaunay(points)\n simplices = self._filter_max_dist_in_element(delaunay.simplices)\n delaunay.simplices = simplices\n return delaunay", "def __plot_delaunay(self, ax=None) -> None:\n for simplex in self.hull.simplices:\n ax.plot(self.points[simplex, 0], self.points[simplex, 1], \"r-\")\n\n tri = Delaunay(self.points)\n ax.triplot(self.points[:, 0], self.points[:, 1], tri.simplices.copy(), lw=1)", "def _get_diff_dc(self):\n self._diff_dc = tuple(encode_differential(self.data[:, 0, 0]))", "def _DeRedden(lam,flux,ra,dec,dustmap_path='/Users/vzm83/Softwares/sfddata-master'): \n m = sfdmap.SFDMap(dustmap_path) \n flux_unred = pyasl.unred(lam,flux,m.ebv(ra,dec))\n return flux_unred", "def delaunay_lattice_from_pts(xy, trimbound=True, target_z=-1, max_bond_length=-1, thres=4.0, zmethod='random',\n minimum_bonds=-1, check=False):\n NP = len(xy)\n tri = Delaunay(xy)\n TRI = tri.vertices\n\n # check\n # plt.triplot(xy[:,0], xy[:,1], TRI, 'go-')\n # plt.show()\n\n BL = TRI2BL(TRI)\n NL, KL = BL2NLandKL(BL, NP=NP, NN='min')\n\n if trimbound:\n # Cut unnatural edge bonds (ones that are long and skinny)\n NL, KL, BL, TRI = delaunay_cut_unnatural_boundary(xy, NL, KL, BL, TRI, thres)\n\n # check\n if check:\n plt.clf()\n plt.triplot(xy[:, 0], xy[:, 1], TRI, 'go-')\n plt.show()\n\n # Cut bonds longer than max allowed length\n if max_bond_length > 0:\n print 'Cutting bonds longer than max_bond_length...'\n BL = cut_bonds(BL, xy, max_bond_length)\n if check:\n display_lattice_2D(xy, BL, title='In delaunay_lattice_from_pts(), removed long bonds.')\n NL, KL = BL2NLandKL(BL, NN='min')\n\n if minimum_bonds > 0:\n # Remove any points with no bonds\n print 'Removing points without any bonds...'\n if minimum_bonds == 1:\n keep = KL.any(axis=1)\n else:\n keep = np.sum(KL, axis=1) > minimum_bonds\n # keep = np.array([np.count_nonzero(KL[i]) > minimum_bonds for i in range(len(KL))])\n xy, NL, KL, BL, PVxydict = remove_pts(keep, xy, BL, NN='min')\n if check:\n display_lattice_2D(xy, BL, NL=NL, KL=KL, title='In delaunay_lattice_from_pts(), removed pts without bonds.')\n\n # Cut bonds to tune average coordination\n if target_z > 0:\n print 'Cutting bonds to tune average coordination...'\n if zmethod == 'random':\n NL, KL, BL = cut_bonds_z_random(xy, NL, KL, BL, target_z)\n elif zmethod == 'highest':\n NL, KL, BL = cut_bonds_z_highest(xy, NL, KL, BL, target_z)\n\n print 'Constructing BM...'\n BM = NL2BM(xy, NL, KL)\n\n if check:\n display_lattice_2D(xy, BL, NL=NL, KL=KL, title='Checking output lattice in delaunay_lattice_from_pts()')\n # vc = cc[:,tri.neighbors]\n # # kill edges at infinity, plotting those would need more work...\n # vc[:,tri.neighbors == -1] = np.nan\n #\n # lines = []\n # lines.extend(zip(cc.T, vc[:,:,0].T))\n # lines.extend(zip(cc.T, vc[:,:,1].T))\n # lines.extend(zip(cc.T, vc[:,:,2].T))\n return xy, NL, KL, BL, BM", "def Deboucle3D(*args):\n return _BRepAlgo.BRepAlgo_Tool_Deboucle3D(*args)", "def ProteinDelaunay(pdbid, chain):\n Data = []\n Head = ['PDBID', 'Quad', 'SortedQuad', 'RedAlpha', 'SortRedAlpha', 'V1', 'V2', 'V3', 'V4', 'L1', 'L2', 'L3', 'L4',\n 'L5', 'L6', 'SumL', 'AvgL', 'DevL', 'DevTetra', 'Vol', 'TF1', 'TF2', 'TF3', 'TF4', 'SumTF', 'AvgTF', 'hullArea', 'hullVolume']\n Data.append(Head)\n \n pointcloud, bf, resname = PointCloudData(pdbid, chainid)\n print \"Given PDB ID: \", pdbid\n print \"Given Chain ID:\", chain\n print \"Number of C-alpha points: \", len(pointcloud)\n\n # Convex Hull.\n ConvxHull = ConvexHull(pointcloud)\n hullArea = round(ConvxHull.area, 4)\n hullVolume = round(ConvxHull.volume, 4)\n\n # Delaunay Tessellation\n delaunay_hull = Delaunay(pointcloud, furthest_site=False, incremental=False, qhull_options='Qc') # noqa E501\n delaunay_points = delaunay_hull.points\n delaunay_vertices = delaunay_hull.vertices\n delaunay_simplices = delaunay_hull.simplices\n delaunay_neighbors = delaunay_hull.neighbors\n print \"Number of Delaunay Simplices: \", len(delaunay_simplices)\n\n for i in delaunay_vertices:\n\n # Obtain the indices of the vertices.\n one, two, three, four = i[2], i[1], i[3], i[0]\n\n # Obtain the coordinates based on the indices.\n cordA = pointcloud[one]\n cordB = pointcloud[two]\n cordC = pointcloud[three]\n cordD = pointcloud[four]\n\n # Get three letter amino acid names based on indices.\n a = resname[one]\n b = resname[two]\n c = resname[three]\n d = resname[four]\n\n # Get the temprature factors for the amino acids.\n a_tf = bf[one]\n b_tf = bf[two]\n c_tf = bf[three]\n d_tf = bf[four]\n\n # Get the string of three letter amino acids\n # forming the vertices of the tetrahedra.\n amino = [a, b, c, d]\n sortAmino = sorted(amino)\n amino = '-'.join(amino)\n sortAmino = '-'.join(sortAmino)\n\n # Get one letter code of the amino acids\n oneA = amino_dict.replace_all(a, amino_dict.one_letter)\n oneB = amino_dict.replace_all(b, amino_dict.one_letter)\n oneC = amino_dict.replace_all(c, amino_dict.one_letter)\n oneD = amino_dict.replace_all(d, amino_dict.one_letter)\n oneLet = [oneA, oneB, oneC, oneD]\n sortOneLet = sorted(oneLet)\n oneLet = ''.join(oneLet)\n sortOneLet = ''.join(sortOneLet)\n\n # Get Reduced Amino Acid Representations.\n flpA = amino_dict.replace_all(oneA, amino_dict.FLP)\n flpB = amino_dict.replace_all(oneB, amino_dict.FLP)\n flpC = amino_dict.replace_all(oneC, amino_dict.FLP)\n flpD = amino_dict.replace_all(oneD, amino_dict.FLP)\n flp = [flpA, flpB, flpC, flpD]\n sortflp = sorted(flp)\n flp = (''.join(flp)).upper()\n sortflp = (''.join(sortflp)).upper()\n\n # Calculate distances between the tetrahedra vertices.\n AB = np.linalg.norm(cordA - cordB)\n AC = np.linalg.norm(cordA - cordC)\n AD = np.linalg.norm(cordA - cordD)\n BC = np.linalg.norm(cordB - cordC)\n BD = np.linalg.norm(cordB - cordD)\n CD = np.linalg.norm(cordC - cordD)\n\n # Calculate the tetrahedra Volume.\n A_prime = cordA - cordD\n B_prime = cordB - cordD\n C_prime = cordC - cordD\n primes = [A_prime, B_prime, C_prime]\n primes = np.asarray(primes)\n det = np.linalg.det(primes)\n Vol = round((abs(det) / 6), 4)\n\n # Sum of Edge Lengths.\n SumL = (AB + AC + AD + BC + BD + CD)\n SumL = round(SumL, 4)\n\n # Average Edge Lengths.\n AvgL = round((SumL / 6), 4)\n\n # Deviation in Edge Lengths.\n devLp = (AB - AvgL) ** 2\n devLq = (AC - AvgL) ** 2\n devLr = (AD - AvgL) ** 2\n devLs = (BC - AvgL) ** 2\n devLt = (BD - AvgL) ** 2\n devLu = (CD - AvgL) ** 2\n devLy = [devLp, devLq, devLr, devLs, devLt, devLu]\n sumDevL = sum(devLy)\n DevL = round(math.sqrt(sumDevL / 6.0), 4)\n\n # Deviation in Tetrahedrality\n lenArr = [AB, AC, AD, BC, BD, CD]\n DevT = DevTetra(lenArr)\n\n # Sum and Average Temperature Factors.\n SumTF = round((a_tf + b_tf + c_tf + d_tf), 4)\n AvgTF = round(SumTF / 4, 4)\n\n # Data List\n line = [pdbid, oneLet, sortOneLet, flp, sortflp, one, two, three, four, AB, AC, AD, BC, BD, CD, SumL, AvgL, DevL, DevT, Vol, a_tf, b_tf, c_tf, d_tf, SumTF, AvgTF, hullArea, hullVolume]\n Data.append(line)\n\n ## Get coordinates based on the vertices.\n ## vertices_coords store the x, y, z coordinates for the delaunay_vertices.\n vertices_coords = pointcloud[delaunay_vertices]\n ## delaunay_indices store the indices for the delaunay_points.\n delaunay_indices = np.arange(len(delaunay_points))\n\n ## Get ready for mayavi plot.\n fig = mlab.figure(1, bgcolor=(0, 0, 0))\n fig.scene.disable_render = True\n ## Get a 3d scatter plot for the delaunay_points.\n mlab.points3d(delaunay_points[:,0], delaunay_points[:,1], delaunay_points[:,2], scale_factor=0.40, color=(0.99, 0.00, 0.00))\n ion_c_alpha_scatter = mlab.pipeline.scalar_scatter(delaunay_points[:,0], delaunay_points[:,1], delaunay_points[:,2], delaunay_indices)\n ion_c_alpha_delaunay = mlab.pipeline.delaunay3d(ion_c_alpha_scatter)\n ion_c_alpha_edges = mlab.pipeline.extract_edges(ion_c_alpha_delaunay)\n mlab.pipeline.surface(ion_c_alpha_edges, colormap='winter', opacity=0.4)\n mlab.savefig(pdbid + '_MayaviViz.x3d')\n mlab.show()\n return Data", "def get_dem(myhuc, sources):\n logging.info(\"\")\n logging.info(\"Preprocessing DEM\")\n logging.info(\"==========================\")\n logging.info(\"downloading DEM\")\n\n # load shapefiles for the HUC of interest\n logging.info(\"loading HUC %s\"%myhuc)\n profile, huc = sources['HUC'].load_huc(myhuc)\n assert(profile['crs']['init'] == 'epsg:4269') # latlong\n\n dem_profile, dem = workflow.clip.clip_dem(huc, sources['DEM'])\n dem = dem[0,:,:] # only the first band\n return dem_profile, dem", "def get_dnde(spectrum,energies):\n energies=units.tonumpy(energies,units.MeV)\n dnde=SpectrumPlotter.get_dnde_mev(spectrum,energies)\n return units.tosympy(dnde,units.ph/units.cm**2/units.s/units.MeV)", "def detrend(xyz_csv, in_dem, aoi_shp):\n\n print('Detrending DEM...')\n detrended_dem = detrend_that_raster(xyz_csv=xyz_csv, in_dem=in_dem, aoi_shp=aoi_shp)\n print('Done')\n print('Detrended DEM @ %s' % detrended_dem)", "def build_delaunay(coords, trim_dist='percentile_size', perc=99, return_dist=False):\n\n # pairs of indices of neighbors\n pairs = Voronoi(coords).ridge_points\n\n if trim_dist is not False:\n dist = distance_neighbors(coords, pairs)\n if not isinstance(trim_dist, (int, float)):\n trim_dist = find_trim_dist(dist=dist, method=trim_dist, nb_nodes=coords.shape[0], perc=perc)\n pairs = pairs[dist < trim_dist, :]\n return pairs", "def dem(\n bounds, bounds_crs, dst_crs, out_file, resolution, interpolation, verbose, quiet\n):\n verbosity = verbose - quiet\n configure_logging(verbosity)\n if not dst_crs:\n dst_crs = \"EPSG:3005\"\n bcdata.get_dem(\n bounds,\n out_file=out_file,\n src_crs=bounds_crs,\n dst_crs=dst_crs,\n resolution=resolution,\n interpolation=interpolation,\n )", "def Decoupler(data,decoupled_name,list_to_decouple=None,decimals=False):\n list_dec = parameters.outputs if list_to_decouple is None else copy.copy(list_to_decouple)\n # Get the arrays of mH, mA ordered as in the outputs\n list_rest = [i for i in data.columns if i not in list_dec] # All but outputs\n n_weights = len(list_dec)\n mHmA = np.empty((0,2))\n for ol in list_dec:\n if decimals:\n arr = np.array([[float(re.findall(r\"\\d*\\.\\d+|\\d+\", ol)[0]),float(re.findall(r\"\\d*\\.\\d+|\\d+\", ol)[1])]])\n else:\n arr = np.array([[int(re.findall(r'_\\d+', ol)[0].replace('_','')),int(re.findall(r'_\\d+', ol)[1].replace('_',''))]])\n mHmA = np.append(mHmA,arr,axis=0)\n # Get the numpy arrays #\n decouple = data[list_dec].values\n repeat = data[list_rest].values\n\n # Repeat and decouple #\n repeat = Repeater(repeat,n_weights)\n masses = np.tile(mHmA,(data.shape[0],1))\n decouple = decouple.flatten()\n\n # Concatenate and make DF #\n new_arr = np.c_[repeat,masses,decouple]\n df = pd.DataFrame(new_arr,columns=list_rest+['mH_MEM','mA_MEM',decoupled_name])\n\n return df", "def __init__(self,num_pores=None,domain_size=None,**kwargs):\n super(Delaunay,self).__init__(**kwargs)\n if (num_pores and domain_size) is None:\n num_pores = 1\n domain_size = [1.0,1.0,1.0]\n else:\n self.generate(num_pores,domain_size)", "def native(self) -> \"Grid2DIterate\":\r\n return Grid2DIterate(\r\n values=self,\r\n mask=self.mask,\r\n fractional_accuracy=self.fractional_accuracy,\r\n sub_steps=self.sub_steps,\r\n store_native=True,\r\n )", "def make_dhdu(ham, controls, derivative_fn):\n\n dHdu = []\n for ctrl in controls:\n dHdu.append(derivative_fn(ham, ctrl['symbol']))\n\n return dHdu", "def build_dcel(self):\r\n\r\n # Step 1: vertex list creation\r\n for v in self.vl:\r\n self.vertices.append(Vertex(v[0], v[1]))\r\n\r\n # Step 2: hedge list creation. Assignment of twins and\r\n # vertices\r\n\r\n for e in self.el:\r\n if e[0] >= 0 and e[1] >= 0:\r\n h1 = Hedge(self.vertices[e[0]],\r\n self.vertices[e[1]])\r\n h2 = Hedge(self.vertices[e[1]], self.vertices[e[0]])\r\n h1.twin = h2\r\n h2.twin = h1\r\n self.vertices[e[1]].hedgelist.append(h1)\r\n self.vertices[e[0]].hedgelist.append(h2)\r\n self.hedges.append(h2)\r\n self.hedges.append(h1)\r\n else:\r\n print(\"oh shit boi wadup\")\r\n\r\n # Step 3: Identification of next and prev hedges\r\n for index, v in enumerate(self.vertices):\r\n v.sort_incident()\r\n l = len(v.hedgelist)\r\n if l < 2:\r\n raise DcelError(\"Badly formed dcel: less than two hedges in vertex:\" + str(index))\r\n else:\r\n for i in range(l - 1):\r\n v.hedgelist[i].nexthedge = v.hedgelist[i + 1].twin\r\n v.hedgelist[i + 1].prevhedge = v.hedgelist[i]\r\n v.hedgelist[l - 1].nexthedge = v.hedgelist[0].twin\r\n v.hedgelist[0].prevhedge = v.hedgelist[l - 1]\r\n\r\n # Step 4: Face assignment\r\n provlist = self.hedges[:]\r\n nf = 0\r\n nh = len(self.hedges)\r\n\r\n while nh > 0:\r\n h = provlist.pop()\r\n nh -= 1\r\n # We check if the hedge already points to a face\r\n if h.face == None:\r\n f = Face()\r\n nf += 1\r\n # We link the hedge to the new face\r\n f.wedge = h\r\n f.wedge.face = f\r\n # And we traverse the boundary of the new face\r\n while not h.nexthedge is f.wedge:\r\n h = h.nexthedge\r\n h.face = f\r\n self.faces.append(f)\r\n # And finally we have to determine the external face\r\n for f in self.faces:\r\n f.external = f.area() < 0", "def __repr__(self):\n return (\n '<DCEL ('\n 'vertices:\\n {obj.vertices},\\n'\n 'edges:\\n {obj.edges},\\n'\n 'faces:\\n {obj.faces}>'.format(obj=self)\n )", "def dem_generation(lastoolsdir, lidardir, ground_poly, cores, units_code, keep_orig_pts, coarse_step,\n coarse_bulge, coarse_spike, coarse_down_spike,\n coarse_offset, fine_step, fine_bulge, fine_spike,\n fine_down_spike, fine_offset, aoi_shp,\n dem_resolution, dem_method, tri_meth, void_meth):\n\n # We carry input spatial ref over from the above process, but we should still convert from shp to ref object\n print('Processing LiDAR to remove vegetation points...')\n las_folder = lidardir + '\\\\las_files\\\\'\n process_lidar(lastoolsdir + '\\\\', las_folder, ground_poly, cores, units_code, keep_orig_pts,\n coarse_step,\n coarse_bulge, coarse_spike, coarse_down_spike,\n coarse_offset, fine_step, fine_bulge, fine_spike,\n fine_down_spike, fine_offset)\n print('Done')\n\n print('Generating a %sm resolution DEM...' % dem_resolution)\n dem = lidar_to_raster(lidardir, ground_poly, aoi_shp, dem_method, tri_meth, void_meth,\n m_cell_size=float(dem_resolution))\n print('Done')\n\n print('Generating hillshade raster for the DEM...')\n hill_out = lidardir + '\\\\hillshade.tif'\n arcpy.HillShade_3d(dem, hill_out)\n print('Done')", "def create_from_hdu(cls, hdu, ebins):\n hpx = HPX.create_from_hdu(hdu, ebins)\n colnames = hdu.columns.names\n cnames = []\n if hpx.conv.convname == 'FGST_SRCMAP_SPARSE':\n pixs = hdu.data.field('PIX')\n chans = hdu.data.field('CHANNEL')\n keys = chans * hpx.npix + pixs\n vals = hdu.data.field('VALUE')\n nebin = len(ebins)\n data = np.zeros((nebin, hpx.npix))\n data.flat[keys] = vals\n else:\n for c in colnames:\n if c.find(hpx.conv.colstring) == 0:\n cnames.append(c)\n nebin = len(cnames)\n data = np.ndarray((nebin, hpx.npix))\n for i, cname in enumerate(cnames):\n data[i, 0:] = hdu.data.field(cname)\n\n return cls(data, hpx)", "def from_dict(data, decomposer_=None, rx=None, ax=None):\n if decomposer_ is not None:\n decomposer = decomposer_\n else:\n decomposer = SOAPDecomposer(**data[\"decomposer\"])\n \n result = SOAPVector(data[\"P\"], decomposer)\n result.dcP = data[\"dcP\"]\n result.dnP = data[\"dnP\"]\n if rx is not None and data[\"rx\"] is None:# pragma: no cover\n result.rx = rx\n else:\n result.rx = data[\"rx\"]\n if ax is not None and data[\"ax\"] is None:# pragma: no cover\n result.ax = ax\n else:\n result.ax = data[\"ax\"]\n \n if data[\"cRDF\"] is not None:\n result.cRDF = DF(data[\"dcP\"], True, result.rx, decomposer,\n calculate=False)\n result.cRDF.df = data[\"cRDF\"]\n if data[\"nRDF\"] is not None:\n result.nRDF = DF(data[\"dnP\"], False, result.rx, decomposer,\n calculate=False)\n result.nRDF.df = data[\"nRDF\"]\n if data[\"cADF\"] is not None:\n result.cADF = DF(data[\"dcP\"], True, result.ax, decomposer,\n calculate=False)\n result.cADF.df = data[\"cADF\"]\n if data[\"nADF\"] is not None:\n result.nADF = DF(data[\"dnP\"], False, result.ax, decomposer,\n calculate=False)\n result.nADF.df = data[\"nADF\"]\n\n return result", "def get_dekosky(self) -> pd.DataFrame:\n return pd.read_feather(self.figure_data_paths.dekosky_vh12_path)", "def loadData(fname='Unstra.out2.00008.athdf'):\n #data=ath.athdf(fname,quantities=['B1','B2','B3'])\n time,data=ath.athdf(fname,quantities=['Bcc1'])\n bx = data['Bcc1']\n time,data=ath.athdf(fname,quantities=['Bcc2'])\n by = data['Bcc2']\n time,data=ath.athdf(fname,quantities=['Bcc3'])\n bz = data['Bcc3']\n x = data['x1f']\n y = data['x2f']\n z = data['x3f']\n # refinement\n rfac = 1.0\n ##if bx.shape[0] < 512:\n ## nz,ny,nx = bx.shape\n ## rfac = int(512/bx.shape[0])\n ## bx = np.repeat(bx,rfac,axis=0)\n ## bx = np.repeat(bx,rfac,axis=1)\n ## bx = np.repeat(bx,rfac,axis=2)\n ## by = np.repeat(by,rfac,axis=0)\n ## by = np.repeat(by,rfac,axis=1)\n ## by = np.repeat(by,rfac,axis=2)\n ## bz = np.repeat(bz,rfac,axis=0)\n ## bz = np.repeat(bz,rfac,axis=1)\n ## bz = np.repeat(bz,rfac,axis=2)\n # ---\n def curl(vx,vy,vz,dx,dy,dz):\n [dzvx,dyvx,dxvx] = np.gradient(vx)\n [dzvy,dyvy,dxvy] = np.gradient(vy)\n [dzvz,dyvz,dxvz] = np.gradient(vz)\n cx = dyvz/dy-dzvy/dz\n cy = dzvx/dz-dxvz/dx\n cz = dxvy/dx-dyvx/dy\n # No need to del the reference by one manually\n # allow python to perform its own garbage collection\n # after the function return cxyz\n #del dzvx\n #del dzvy\n #del dzvz\n return cx,cy,cz\n # ---\n dx = dz = (x[1]-x[0])/rfac\n dy = (y[1]-y[0])/rfac\n jx,jy,jz = curl(bx,by,bz,dx,dy,dz)\n j2 = jx**2+jy**2+jz**2\n return j2", "def from_pyradex(self, integrated_flux, mol_data, line_width=1.0 * u.km / u.s,\n escapeProbGeom='lvg', iter=100,\n collider_density={'H2': 900*2.2}):\n\n try:\n import pyradex\n except ImportError:\n raise ImportError('Pyradex not installed. Please see \\\n https://github.com/keflavich/pyradex/blob/master/INSTALL.rst')\n\n if not isinstance(mol_data, Phys):\n raise ValueError('mol_data must be a `sbpy.data.phys` instance.')\n\n register('Production Rates', {'Radex': '2007A&A...468..627V'})\n\n # convert mol_tag JPLSpec identifier to verbose name if needed\n try:\n mol_data['lamda_name']\n name = mol_data['lamda_name'][0]\n name = name.lower()\n except KeyError:\n if not isinstance(mol_data['mol_tag'][0], str):\n cat = JPLSpec.get_species_table()\n mol = cat[cat['TAG'] == mol_data['mol_tag'][0]]\n name = mol['NAME'].data[0]\n name = name.lower()\n else:\n name = mol_data['mol_tag'][0]\n name = name.lower()\n\n # try various common instances of molecule names and check them against LAMDA before complaining\n try:\n Lamda.molecule_dict[name]\n except KeyError:\n try_name = \"{}@xpol\".format(name)\n try:\n Lamda.molecule_dict[try_name]\n name = try_name\n except KeyError:\n print('Molecule name {} not found in LAMDA, module tried {} and also\\\n found no molecule with this identifier within LAMDA. Please\\\n enter LAMDA identifiable name using mol_data[\"lamda_name\"]\\\n . Use Lamda.molecule_dict to see all available options.'.format(name, try_name))\n raise\n\n # define Temperature\n temp = mol_data['temp']\n\n # check for optional values within mol_data\n if 'temp_back' in mol_data:\n tbackground = mol_data['temp_back']\n else:\n tbackground = 2.730 * u.K\n\n # define cdensity and iteration parameters\n cdensity = mol_data['cdensity'].to(1 / (u.cm * u.cm))\n cdensity_low = cdensity - (cdensity*0.9)\n cdensity_high = cdensity + (cdensity*9)\n # range for 400 iterations\n cdensity_range = np.linspace(cdensity_low, cdensity_high, iter)\n fluxes = []\n column_density = []\n\n with tempfile.TemporaryDirectory() as datapath:\n for i in cdensity_range:\n R = pyradex.Radex(column=i, deltav=line_width,\n tbackground=tbackground, species=name,\n temperature=temp, datapath=datapath,\n escapeProbGeom=escapeProbGeom,\n collider_densities=collider_density)\n\n table = R()\n\n # find closest matching frequency to user defined\n indx = (np.abs(table['frequency']-mol_data['t_freq'])).argmin()\n radexfreq = table['frequency'][indx]\n # get table for that frequency\n values = table[table['frequency'] == radexfreq]\n # use eq in io.f from Pyradex to get integrated flux in K * km/s\n int_flux_pyradex = 1.0645 * values['T_B'] * line_width\n\n fluxes.append(int_flux_pyradex)\n column_density.append(i)\n\n # closest matching integrated flux from pyradex\n\n fluxes = np.array(fluxes)\n\n index_flux = (\n np.abs(fluxes-integrated_flux.to(u.K * u.km / u.s).value)).argmin()\n\n # corresponding column density in 1/cm^2\n column_density = column_density[index_flux]\n print('Closest Integrated Flux:{}'.format(\n fluxes[index_flux] * u.K * u.km / u.s))\n print('Given Integrated Flux: {}'.format(integrated_flux))\n\n return column_density", "def BRepAlgo_Tool_Deboucle3D(*args):\n return _BRepAlgo.BRepAlgo_Tool_Deboucle3D(*args)", "def __init__(self,points,prescribedValues,PDEMatrix=np.eye(2),functionRHS=lambda x: 0):\n \n self.functionRHS= functionRHS\n\n #referenceElement holds the points of the reference element from which all other elements\n #are calculated\n self.referenceElement = np.array([[0,0],[1.,0],[0,1.]])\n\n #Calculate a delaunay triangulation of the input points\n self.triangulation = Delaunay(points)\n\n #Uses to initiate the stiffness matrix and the Rhs with the correct size\n self.numberDOF = np.size(self.triangulation.points[:,0])\n\n #is the biggest side of the triangulation\n self.maxDiam = 0\n\n self.prescribedValues = [] \n if self.checkPrescribedValues(prescribedValues):\n self.prescribedValues = prescribedValues\n else:\n print(\"Error: Prescribed Value index not an integer\")\n #the 3 linear Basis funtctions on the reference triangle\n #each has the value 1 at one points and 0 at the other points\n #Numbering of the vertices according to self.referenceElement\n self.linearBasis = []\n self.linearBasis.append(lambda x : 1-x[0]-x[1])\n self.linearBasis.append(lambda x : x[0])\n self.linearBasis.append(lambda x : x[1])\n\n #gradients of the basis functions on a reference triangle\n self.gradBasis = []\n self.gradBasis.append(np.array([-1.,-1])) \n self.gradBasis.append(np.array([1.,0]))\n self.gradBasis.append(np.array([0,1.]))\n\n #Holds integral of two basisfunctons over one reference triangle\n self.elementaryBasisMatrix = 1.0/12*np.array([[1.,0.5,0.5],[0.5,1.,0.5],[0.5,0.5,1.]])\n\n #initiate Righthandside with zeros\n self.rightHandSide = np.zeros(self.numberDOF)\n \n #strong form of PDE is: div(A dot grad(u)) = f, where A is PDEMatrix\n self.PDEMatrix= PDEMatrix", "def __call__(self, jd):\n e = self.ephemeris.earth(jd)\n tpos_au, tvel_au_per_d = self._position_and_velocity(jd)\n t = Barycentric(e.position.au + tpos_au,\n e.velocity.au_per_d + tvel_au_per_d,\n jd)\n t.geocentric = False # test, then get rid of this attribute\n t.rGCRS = tpos_au\n t.vGCRS = tvel_au_per_d\n t.topos = self\n t.ephemeris = self.ephemeris\n t.altaz_rotation = self._altaz_rotation(jd)\n return t", "def show_derivative(self):\n for trace in self.plotWidget.plotDataItems:\n dt = float(trace.attrs['dt'])\n dtrace = np.diff(trace.data)\n x = pgplot.make_xvector(dtrace, dt)\n self.plotWidget.plot(x, dtrace, pen=pg.mkPen('r'))", "def calculate_derivative(current_data):\n\n # Declare array with first value equals zero to build gradient\n derivative_data = [0]\n derivative_data = np.gradient(current_data[\"4. close\"])\n # Add gradient values as column to current dataframe\n current_data[\"gradient\"] = derivative_data\n\n return current_data", "def _lindblad_driven(H, rho0, c_ops=None, e_ops=None, Nt=1, dt=0.005, t0=0.,\n return_result=True):\n\n def calculateH(t):\n\n Ht = H[0]\n\n for i in range(1, len(H)):\n Ht += - H[i][1](t) * H[i][0]\n\n return Ht\n\n nstates = H[0].shape[-1]\n\n if c_ops is None:\n c_ops = []\n if e_ops is None:\n e_ops = []\n\n\n # initialize the density matrix\n rho = rho0.copy()\n rho = rho.astype(complex)\n\n\n\n t = t0\n\n if return_result == False:\n\n f_dm = open('den_mat.dat', 'w')\n fmt_dm = '{} ' * (nstates**2 + 1) + '\\n'\n\n f_obs = open('obs.dat', 'w')\n fmt = '{} '* (len(e_ops) + 1) + '\\n'\n\n for k in range(Nt):\n\n t += dt\n\n Ht = calculateH(t)\n\n rho = rk4(rho, liouvillian, dt, Ht, c_ops)\n\n # dipole-dipole auto-corrlation function\n #cor = np.trace(np.matmul(d, rho))\n\n # take a partial trace to obtain the rho_el\n # compute observables\n observables = np.zeros(len(e_ops), dtype=complex)\n\n for i, obs_op in enumerate(e_ops):\n observables[i] = obs_dm(rho, obs_op)\n\n f_obs.write(fmt.format(t, *observables))\n\n\n f_obs.close()\n f_dm.close()\n\n return rho\n\n else:\n\n rholist = [] # store density matries\n\n result = Result(dt=dt, Nt=Nt, rho0=rho0)\n\n observables = np.zeros((Nt, len(e_ops)), dtype=complex)\n\n for k in range(Nt):\n\n t += dt\n\n Ht = calculateH(t)\n\n rho = rk4(rho, liouvillian, dt, Ht, c_ops)\n\n rholist.append(rho.copy())\n\n observables[k, :] = [obs_dm(rho, op) for op in e_ops]\n\n\n result.observables = observables\n result.rholist = rholist\n\n return result", "def hp2deca(hp):\n return DECAngle(hp2dec(hp))", "def itkIsoDataThresholdCalculatorHDUC_cast(obj: 'itkLightObject') -> \"itkIsoDataThresholdCalculatorHDUC *\":\n return _itkIsoDataThresholdCalculatorPython.itkIsoDataThresholdCalculatorHDUC_cast(obj)", "def convert_hdu_to_ldac(hdu):\n from astropy.io import fits\n import numpy as np\n tblhdr = np.array([hdu.header.tostring(',')])\n col1 = fits.Column(name='Field Header Card', array=tblhdr, format='13200A')\n cols = fits.ColDefs([col1])\n tbl1 = fits.BinTableHDU.from_columns(cols)\n tbl1.header['TDIM1'] = '(80, {0})'.format(len(hdu.header))\n tbl1.header['EXTNAME'] = 'LDAC_IMHEAD'\n tbl2 = fits.BinTableHDU(hdu.data)\n tbl2.header['EXTNAME'] = 'LDAC_OBJECTS'\n return (tbl1, tbl2)", "def decompose(self):\r\n dummy = self.ortho()\r\n dummy.setRow(3,_vec4(0.0, 0.0, 0.0, 1.0))\r\n\r\n x = dummy.getColumn(0)\r\n y = dummy.getColumn(1)\r\n z = dummy.getColumn(2)\r\n xl = x.length()\r\n yl = y.length()\r\n zl = z.length()\r\n scale = _vec3(xl,yl,zl)\r\n \r\n x/=xl\r\n y/=yl\r\n z/=zl\r\n dummy.setColumn(0,x)\r\n dummy.setColumn(1,y)\r\n dummy.setColumn(2,z)\r\n if dummy.determinant()<0.0:\r\n dummy.setColumn(0,-x)\r\n scale.x=-scale.x\r\n\r\n return (_vec3(self.mlist[3], self.mlist[7], self.mlist[11]),\r\n dummy,\r\n scale)", "def dudz(self, z):\n raise NotImplementedError", "def dema(client, symbol, range=\"6m\", col=\"close\", periods=None):\n if periods is None:\n periods = [30]\n periods = tolist(periods)\n\n df = client.chartDF(symbol, range)\n\n build = {col: df[col].values}\n for per in periods:\n build[\"ema-{}\".format(per)] = t.DEMA(df[col].values.astype(float), per)\n return pd.DataFrame(build)", "def convert_data (data_taxi,density):\n \n n_trips = len(data_taxi)\n \n min_longitude = min(min(list(data_taxi.loc[:,'pickup_longitude'])),\n min(list(data_taxi.loc[:,'dropoff_longitude'])))\n max_longitude = max(max(list(data_taxi.loc[:,'pickup_longitude'])),\n max(list(data_taxi.loc[:,'dropoff_longitude'])))\n min_latitude = min(min(list(data_taxi.loc[:,'pickup_latitude'])),\n min(list(data_taxi.loc[:,'dropoff_latitude'])))\n max_latitude = max(max(list(data_taxi.loc[:,'pickup_latitude'])),\n max(list(data_taxi.loc[:,'dropoff_latitude'])))\n \n e_longitude = max_longitude - min_longitude\n \n e_latitude = max_latitude - min_latitude\n \n scale =np.sqrt( n_trips/( e_longitude* e_latitude * density) )\n\n taxis = []\n \n for i in range(n_trips):\n selected_taxi = data_taxi.iloc[i]\n departure = [int((selected_taxi.pickup_longitude - min_longitude) * scale),\n int((selected_taxi.pickup_latitude - min_latitude) * scale),\n ]\n \n arrival = [\n int((selected_taxi.dropoff_longitude - min_longitude) * scale),\n int((selected_taxi.dropoff_latitude - min_latitude) * scale)]\n \n taxis.append(taxi(departure,arrival,departure))\n return taxis,int(scale*(e_latitude))+1,int(scale*(e_longitude))+1", "def derivcd4(vals, dx):\n deriv = []\n for i in range(2):\n deriv.append((-3*vals[i] + 4*vals[i+1] - vals[i+2]) / (2*dx))\n for i in range(2, len(vals) - 2):\n deriv.append((-1*vals[i-2] + 8*vals[i-1] + 8*vals[i+1] -\\\n vals[i+2]) / (12*dx))\n # Note that due to the fact that this function has been set up this\n # way, this will not output a value at 5000000\n if i % 500000 == 0:\n print('Derivative list: {}'.format(i))\n for i in range((len(vals) - 2), len(vals)):\n deriv.append((vals[i] - vals[i-1]) / dx)\n return deriv", "def _detrend_all(self, Z, dx, Lmax, datmin_grid):\n [nt, ny, nx] = Z.shape\n z = np.empty((nt, ny, nx), 'float')\n z_f = np.empty((nt, ny, nx//2 + 1), 'complex')\n\n for t in range(nt):\n for y in range(ny):\n z[t,y], z_f[t,y] = self._detrend_xsect(Z[t,y].copy(), dx, Lmax, datmin_grid)\n\n return z, z_f", "def __init__(self, dset, grid=None):\n self.grid = xgcm.Grid(dset) if grid is None else grid\n self.coords = dset.coords.to_dataset().reset_coords()\n self.dset = dset\n self.terms = None\n \n # self.dset = dset.reset_coords(drop=True)\n # self.volume = dset.drF * dset.hFacC * dset.rA\n \n self.BCx = 'periodic' if self.grid.axes['X']._periodic else 'fill'\n self.BCy = 'periodic' if self.grid.axes['Y']._periodic else 'fill'", "def dpdnc(x, y):\n ed = ED.EdData(x, y)\n dpdnc_ed(x, y, ed)\n ed.complete_path()\n ed.complete_reverse()\n ed.complete_indexes()\n return ed", "def STL_decomposition(series,\r\n title,\r\n test = False,\r\n savepath = False,\r\n savename = False,\r\n ylabel = \"Battery Level (%)\",\r\n xlabel = \"Date\",\r\n dates = False,\r\n ):\r\n assert isinstance(series, np.ndarray), \"Series is not a numpy array.\"\r\n \r\n Result = STL(series, \r\n period=24, \r\n seasonal=7, \r\n trend=None, \r\n low_pass=None,\r\n seasonal_deg=0, \r\n trend_deg=0, \r\n low_pass_deg=0, \r\n robust=False,\r\n seasonal_jump=1, \r\n trend_jump=1, \r\n low_pass_jump=1).fit()\r\n\r\n if test == False:\r\n _plot_decomposition(Result,\r\n title,\r\n savepath = savepath,\r\n savename = savename,\r\n ylabel = \"Value\",\r\n xlabel = \"Date\",\r\n dates = False,\r\n )\r\n \r\n return Result", "def df_dX(self, dL_df, X):\r\n raise NotImplementedError", "def __init__(self, eulers_list):\n self.__dpi = 150\n self.__title = \"default\"\n self.__data = eulers_list\n self.__plane_list = [[0, 0, 1]]\n self.__is_literal = True # whether to use permutation to get a family of planes\n self.__lattice_vector = np.array([1.0, 1.0, 1.0]) # most simple case as default\n self.__output = \"pdf\"\n self.__clr_list = None\n self.__ref = np.eye(3) # matrix used to define xtal unit cell in reference configuration\n # set up pyplot\n self.__fig = plt.figure()\n self.__fig.add_subplot(111, aspect='equal')\n self.__fig.gca().add_artist(plt.Circle((0, 0), 1, color='k', fill=False))\n self.__unique_marker = False\n plt.plot([-1, 1], [0, 0], c=\"k\")\n plt.plot([0, 0], [-1, 1], c=\"k\")\n plt.gca().set_xlim((-1.15, 1.15))\n plt.gca().set_ylim((-1.15, 1.15))\n plt.gca().axes.get_xaxis().set_visible(False)\n plt.gca().axes.get_yaxis().set_visible(False)", "def makePlot(ra, dec, date=None, name=None, figsize=(6.,6.), dpi=80, s=50, center=None, airmass=True, moon=True, des=True):\n #figsize=(10.5,8.5)\n if date is None: date = ephem.now()\n if type(date) != ephem.Date:\n date = ephem.Date(date)\n\n observatory = utils.ctio()\n observatory.date = date\n \n #fig, ax = plt.subplots(fig='ortho', figsize=FIGSIZE, dpi=DPI)\n #fig = plt.figure('ortho')\n #ax = plt.subplots(figure=fig, figsize=FIGSIZE, dpi=DPI)\n fig = plt.figure(name, figsize=figsize, dpi=dpi)\n\n ra_zenith, dec_zenith = observatory.radec_of(0, '90') # RA and Dec of zenith\n ra_zenith = np.degrees(ra_zenith)\n dec_zenith = np.degrees(dec_zenith)\n\n # Zenith position\n #lon_zen = LMC_RA; lat_zen = LMC_DEC\n lon_zen = ra_zenith; lat_zen = dec_zenith\n\n # Create the basemap\n proj_kwargs = dict(projection='ortho', celestial=True)\n if center is None:\n lon_0, lat_0 = -lon_zen, lat_zen # Center position\n else:\n lon_0, lat_0 = center[0], center[1]\n\n proj_kwargs.update(lon_0=lon_0, lat_0=lat_0)\n #print proj_kwargs\n print(proj_kwargs)\n basemap = DECamBasemap(**proj_kwargs)\n\n parallels = np.arange(-90.,120.,30.)\n basemap.drawparallels(parallels)\n meridians = np.arange(0.,420.,60.)\n basemap.drawmeridians(meridians)\n\n if des: drawDES(basemap)\n if airmass: drawAirmassContour(basemap, observatory, 2., s=s)\n if moon: drawMoon(basemap, date)\n plt.title('%s UTC'%(datestring(date)))\n \n drawTarget(basemap, ra, dec)\n\n #return fig, ax, basemap\n return fig, basemap", "def _lindblad(H, rho0, c_ops, e_ops=None, Nt=1, dt=0.005, return_result=True):\n\n # initialize the density matrix\n rho = rho0.copy()\n rho = rho.astype(complex)\n\n if e_ops is None:\n e_ops = []\n\n t = 0.0\n # first-step\n # rho_half = rho0 + liouvillian(rho0, h0, c_ops) * dt2\n # rho1 = rho0 + liouvillian(rho_half, h0, c_ops) * dt\n\n # rho_old = rho0\n # rho = rho1\n if return_result == False:\n\n # f_dm = open('den_mat.dat', 'w')\n # fmt_dm = '{} ' * (nstates**2 + 1) + '\\n'\n\n f_obs = open('obs.dat', 'w')\n fmt = '{} '* (len(e_ops) + 1) + '\\n'\n\n for k in range(Nt):\n\n # compute observables\n observables = np.zeros(len(e_ops), dtype=complex)\n\n for i, obs_op in enumerate(e_ops):\n observables[i] = obs_dm(rho, obs_op)\n\n t += dt\n\n # rho_new = rho_old + liouvillian(rho, h0, c_ops) * 2. * dt\n # # update rho_old\n # rho_old = rho\n # rho = rho_new\n\n rho = rk4(rho, liouvillian, dt, H, c_ops)\n\n # dipole-dipole auto-corrlation function\n #cor = np.trace(np.matmul(d, rho))\n\n # take a partial trace to obtain the rho_el\n\n\n f_obs.write(fmt.format(t, *observables))\n\n\n f_obs.close()\n # f_dm.close()\n\n return rho\n\n else:\n\n rholist = [] # store density matries\n\n result = Result(dt=dt, Nt=Nt, rho0=rho0)\n\n observables = np.zeros((Nt, len(e_ops)), dtype=complex)\n\n for k in range(Nt):\n\n t += dt\n rho = rk4(rho, liouvillian, dt, H, c_ops)\n\n rholist.append(rho.copy())\n\n\n observables[k, :] = [obs_dm(rho, op) for op in e_ops]\n\n\n result.observables = observables\n result.rholist = rholist\n\n return result", "def get_daily_data():\n class C:\n pass\n\n def get_ticker(ticker):\n vals = []\n\n datafile = cbook.get_sample_data('%s.csv' % ticker, asfileobj=False)\n\n lines = open(datafile).readlines()\n for line in lines[1:]:\n vals.append([float(val) for val in line.split(',')[1:]])\n\n M = array(vals)\n c = C()\n c.open = M[:, 0]\n c.high = M[:, 1]\n c.low = M[:, 2]\n c.close = M[:, 3]\n c.volume = M[:, 4]\n return c\n c1 = get_ticker('intc')\n c2 = get_ticker('msft')\n return c1, c2", "def get_dema(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.DEMA(data)\n if result is None:\n raise IndicatorException\n return result", "def __init__(self, radec, zabs, vlim, NHI, **kwargs):\n # NHI\n if NHI < 20.3:\n raise ValueError(\"This is not a DLA! Try an LLS (or SLLS)\")\n # vlim\n if vlim is None:\n vlim = [-500., 500.]*u.km/u.s\n # Generate with type\n IGMSystem.__init__(self, 'DLA', radec, zabs, vlim, NHI=NHI, **kwargs)\n\n # Other\n self.ZH = 0.", "def __init__(self, dzdt, v, e, D):\n self.V = dzdt\n self.v = v\n self.e = e\n self.D = D\n self.laminar_label = \"Laminar\"\n self.critical_label = \"Critical\"\n self.turbulent_label = \"Turbulent\"", "def prepare_data(lc=None, rv=None):\n\n if lc is None and rv is None:\n raise ValueError(\"You must either specify a light curve, \"\n \"or radial velocity data! (or both)\")\n\n data_kw = {}\n t_ref = None\n\n if lc is not None:\n\n # Convert to parts per thousand\n mask = np.isfinite(lc.flux) & np.isfinite(lc.flux_err)\n y = (lc.flux[mask] / np.median(lc.flux[mask]) - 1) * 1e3\n yerr = lc.flux_err[mask] * 1e3\n\n t = lc.astropy_time.tcb.jd[mask]\n t_ref = BKJD_OFFSET\n t = t - t_ref\n\n data_kw['flux_t'] = t\n data_kw['flux_ppt'] = y\n data_kw['flux_ppt_err'] = yerr\n\n if rv is not None:\n if t_ref is None:\n t_ref = rv.t.tcb.jd\n\n data_kw['rv_t'] = rv.t.tcb.jd - t_ref\n data_kw['rv_mps'] = rv.rv.to_value(u.m/u.s)\n data_kw['rv_mps_err'] = rv.rv_err.to_value(u.m/u.s)\n\n data_kw['t_ref_jd'] = t_ref\n\n return RawData(**data_kw)", "def dardar2era(dardar, ERA, p_grid):\n lon_d = dardar.get_data('longitude')\n lat_d = dardar.get_data('latitude')\n height_d = dardar.get_data('height')\n\n \n# convert longitude from -180-180 to 0-360\n if lon_d.min() < 0:\n lon_d = lon_d % 360\n \n# add extra pressure level in ERA5 data\n xlevel = 1200\n ERA.add_extra_level('temperature', xlevel)\n ERA.add_extra_level('geopotential', xlevel)\n \n# get ERA lat/lon/pressure grids\n \n lat = ERA.t.latitude.data\n lon = ERA.t.longitude.data\n level = ERA.t.level.data \n t = ERA.t.t[0].data\n z = ERA.z.z[0].data\n \n level = np.log(level) # convert pressure to log\n \n \n# add two extra dimension to longitudes to wrap around during interpolation\n \n lon, z = expand_lon(ERA.z.longitude.data, z )\n lon, t = expand_lon(ERA.t.longitude.data, t )\n \n #my_interpolating_function = RegularGridInterpolator((level, lat, lon), A)\n \n p_grid = np.arange(1, 1150, 10)\n points = []\n \n# interpolate ERA5 to DARDAR lat/lon locations\n \n for i in range(len(p_grid)):\n p = np.log(p_grid[i]) # convert pressure to log range\n pts = [[p, lat_d[j], lon_d[j]] for j in range(len(lat_d))] \n points.append(pts)\n \n my_interpolating_function = interpolate(level, lat, lon, t) \n grid_t = my_interpolating_function(points)\n \n my_interpolating_function = interpolate(level, lat, lon, z) \n grid_z = my_interpolating_function(points)\n \n return grid_t, grid_z", "def tde(ax, col, legend):\n z = 0.354\n d = Planck15.luminosity_distance(z=z).cgs.value\n\n # In the Eftekhari paper, it says that although the event was first\n # triggered by Swift/BAT on 2011 March 28.55 UT, subsequent\n # analysis of the BAT data revealed discernible emission as early as\n # 2011 March 25. All times should therefore be shifted relative to Mar 25.5\n\n # Need to add 3.04 to the Zauderer points\n nu, dt, f, ef, islim = zauderer()\n t = (dt+3.04)/(1+z)\n\n # Low frequency\n nu_plt = 4.9E9\n choose = np.logical_and(~islim, nu == nu_plt/1E9)\n dt_all = t[choose]\n nufnu_all = nu_plt*f[choose]\n\n # adding the set from Berger2012\n # and making the same correction as above\n # this is 4.9 GHz\n t = (np.array([3.87, 4.76, 5.00, 5.79, 6.78, 7.77, 9.79, 14.98, 22.78,\n 35.86, 50.65, 67.61, 94.64, 111.62, 126.51, 143.62, 164.38, 174.47,\n 197.41, 213.32])) / (1+z)\n f = np.array([0.25, 0.34, 0.34, 0.61, 0.82, 1.48, 1.47, 1.80, 2.10, 4.62,\n 4.84, 5.86, 9.06, 9.10, 9.10, 11.71, 12.93, 12.83, 13.29, 12.43])\n\n # Berger 2012: use the 8.4 GHz light curve, since that's closest in freq\n #t = (np.array([14.97, 127.69, 159.77, 174.47, 177.50, 197.41, 213.32, 219.22]))/(1+z)\n #f = np.array([5.49, 19.03, 22.15, 23.19, 23.65, 22.42, 22.04, 21.52])\n #dt_all = np.append(dt_all, t)\n #nufnu_all = np.append(nufnu_all, f*nu_plt)\n\n # adding the set from Zauderer2013\n # they also say it's relative to March 25.5...\n # so I think I need to subtract 3.04 days from here too\n t = (np.array([245.23, 302.95, 383.92, 453.66, 582.31]))/(1+z)\n f = np.array([12.17, 12.05, 12.24, 11.12, 8.90])\n dt_all = np.append(dt_all, t)\n nufnu_all = np.append(nufnu_all, f*nu_plt)\n\n # adding the set from Eftekhari 2018\n t = np.array([645, 651.1, 787.6, 1032, 1105, 1373, 1894])\n f = np.array([8.24, 8.63, 6.23, 4.21, 3.52, 2.34, 1.47])\n dt_all = np.append(dt_all, t)\n nufnu_all = np.append(nufnu_all, f*nu_plt)\n\n order = np.argsort(dt_all)\n lum = plot_line(\n ax, d, dt_all[order], nufnu_all[order], \n 'SwiftJ1644+57', 'TDE', col, legend)\n ax.text(dt_all[order][10], lum[10]*1.1, 'Swift J1644+57', fontsize=11,\n verticalalignment='bottom',\n horizontalalignment='left')", "def n1derivative_clee(cl_array,bins,n1bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n \n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n \n a=compute_n1_py(clpp,norms,cls,cltt,array1001[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n b=compute_n1_py(clpp,norms,cls,cltt,array999[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n\n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n1bins)]-N0999[k][i][:len(n1bins)])*(n1bins*(n1bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n1{}dclee.txt'.format(keys[k]),der)\n return derlist", "def __init__(self, label_num, des_dir, des_dim=48):\n self.label_num = label_num\n self.des_dir = des_dir\n self.des_dim = 48", "def n0derivative_clee(cl_array,bins,n0bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n \n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n0_py(clpp,cls,cltt,array1001[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n b=compute_n0_py(clpp,cls,cltt,array999[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n\n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n0bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n0bins)]-N0999[k][i][:len(n0bins)])*(n0bins*(n0bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n0{}dclee.txt'.format(keys[k]),der)\n print(derlist)\n return derlist", "def from_cdo_griddes(griddes):\n\n with open(griddes) as grid_file:\n grid_file_lines = grid_file.readlines()\n\n grid_dic = {}\n\n for line in grid_file_lines:\n words = line.split()\n if words[0] == '#':\n continue\n else:\n length = len(words)\n if length == 3:\n grid_dic[words[0]] = words[2]\n else:\n value_string = ' '.join(words[2:length-1])\n grid_dic[words[0]] = value_string\n\n if grid_dic['gridtype'] != 'lonlat':\n print(('Gridtype {0} not supported'.format(grid_dic['gridtype'])))\n return ''\n\n lon = np.zeros(int(grid_dic['xsize']))\n lat = np.zeros(int(grid_dic['ysize']))\n\n for i in range(len(lon)):\n lon[i] = float(grid_dic['xfirst']) + i * float(grid_dic['xinc'])\n for j in range(len(lat)):\n lat[j] = float(grid_dic['yfirst']) + j * float(grid_dic['yinc'])\n\n if grid_dic['xname'] == 'rlon':\n pol_lon = float(grid_dic['xnpole'])\n pol_lat = float(grid_dic['ynpole'])\n grid = RotGrid(lon, lat, pol_lon, pol_lat)\n else:\n grid = Grid(lon, lat)\n\n return grid", "def calculate_with_julianTD(self, jde):\n toRad = math.pi / 180.0\n self.T = (jde - astrodate.J2000) / 365250.0\n self.__calculate_L(self.terms[0])\n self.__calculate_B(self.terms[1])\n self.__calculate_R(self.terms[2])\n t = self.T * 10.0\n Lp = (self.L + (((-0.00031 * t) - 1.397) * t)) * toRad\n self.dL = (-0.09033 + (0.03916 * (math.cos(Lp) + math.sin(Lp)) * math.tan(self.B * toRad))) / 3600.0\n self.dB = (0.03916 * (math.cos(Lp) - math.sin(Lp))) / 3600.0", "def make_decreasing_ohlc(open, high, low, close, dates, **kwargs):\n (flat_decrease_x, flat_decrease_y, text_decrease) = _OHLC(\n open, high, low, close, dates\n ).get_decrease()\n\n kwargs.setdefault(\"line\", dict(color=_DEFAULT_DECREASING_COLOR, width=1))\n kwargs.setdefault(\"text\", text_decrease)\n kwargs.setdefault(\"showlegend\", False)\n kwargs.setdefault(\"name\", \"Decreasing\")\n\n ohlc_decr = dict(\n type=\"scatter\", x=flat_decrease_x, y=flat_decrease_y, mode=\"lines\", **kwargs\n )\n return ohlc_decr", "def scn2datacube(self, unq_id):\n raise EODataDownException(\"Not implemented.\")", "def dx(self):\n values = self._interpolate_table(\"dx\")\n return values", "def generate_decomposition_LEGACY(labels, expected_eigenvalues, alphabeta):\n # generate Lanczos decomposition\n raw_decomposition = generate_raw_decomposition(alphabeta)\n\n # bin Lanczos decomposition\n bins = histogram.BinMapping.create_bisection_bins(expected_eigenvalues)\n binned_decomposition = histogram.BinMapping(keys=labels, bins=bins)\n for eigval, probability in raw_decomposition:\n binned_decomposition[eigval] += probability\n\n return (raw_decomposition,binned_decomposition)", "def gon2deca(gon):\n return DECAngle(gon2dec(gon))", "def make_derivate_fn(self):\n\n def derivative_fn(y, t):\n self.time = t\n self.compartments = self.convert_list_to_compartments(y)\n self.prepare_vars_and_flows()\n flow_vector = self.convert_compartments_to_list(self.flows)\n self.checks()\n return flow_vector\n\n return derivative_fn", "def download_glider(dataset_id, bounding_box):\n # set up the components of the ERDDAP request\n GLIDER_DAC.constraints = {\n 'latitude>=': bounding_box[0],\n 'latitude<=': bounding_box[1],\n 'longitude>=': bounding_box[2],\n 'longitude<=': bounding_box[3],\n }\n GLIDER_DAC.protocol = 'tabledap'\n GLIDER_DAC.variables = ['precise_time', 'precise_lon', 'precise_lat', 'depth', 'pressure', 'temperature',\n 'conductivity', 'salinity', 'density', 'backscatter', 'CDOM', 'chlorophyll',\n 'dissolved_oxygen', 'PAR']\n GLIDER_DAC.dataset_id = dataset_id\n\n # load the data into an xarray dataset\n try:\n ds = GLIDER_DAC.to_xarray()\n except requests.exceptions.HTTPError:\n # message = f\"No data found within the bounding box for dataset ID {dataset_id}.\"\n # warnings.warn(message)\n return None\n\n ds = ds.swap_dims({'obs': 'precise_time'})\n ds = ds.reset_coords()\n keys = ['profile_id', 'time', 'longitude', 'latitude', 'trajectoryIndex', 'rowSize']\n for key in keys:\n if key in ds.variables:\n ds = ds.drop_vars(key)\n\n # rename some parameters to align with the other data sets (PARAD and FLORT)\n ds = ds.squeeze(drop=True)\n ds = ds.rename({\n 'precise_time': 'time',\n 'precise_lon': 'longitude',\n 'precise_lat': 'latitude',\n 'PAR': 'par',\n 'backscatter': 'bback',\n 'chlorophyll': 'estimated_chlorophyll',\n 'CDOM': 'fluorometric_cdom'\n })\n\n # sort the data by time\n ds = ds.sortby('time')\n\n return ds", "def __init__(self,dos):\n self.e = self.double(dos.e,-1.0)\n self.g = self.double(dos.g)\n self.gz = self.double(dos.gz)\n self.cutoffInd = dos.cutoffInd\n self.cutoff = dos.cutoff\n self.de = dos.de", "def drfl_dsurfaceb(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))", "def n1derivative_clte(cl_array,bins,n1bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n1_py(clpp,norms,cls,cltt,clee,clbb,array1001[i],NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n b=compute_n1_py(clpp,norms,cls,cltt,clee,clbb,array999[i],NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n \n keys=['TT','EE','EB','TE','TB']\n\n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n1bins)]-N0999[k][i][:len(n1bins)])*(n1bins*(n1bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n1{}dclte.txt'.format(keys[k]),der)\n return derlist", "def Calc_dopage_labs(l_onde) :\n eps0=8.854e-12\n mel=9.109e-31\n ev=1.60218e-19\n c= 3e8\n return (1e12*4*(np.pi*c)**2*eps0*(10.9)*(0.067*mel/(l_onde*ev)**2))*1e-6", "def __init__(self, vertices=None, edges=None, faces=None):\n super(DCEL, self).__init__()\n self.vertices = vertices or []\n self.edges = edges or []\n self.faces = faces or []", "def compile_ODEs(self):\n global dydt, ct, pool\n \n if hasattr(self, \"pool\"):\n self.pool.close()\n \n self.compute_dependencies()\n self.compute_Jacobian()\n self.derivative_components = {}\n for name in self.names_species:\n self.derivative_components[name] = compile(self.ODEs[name], \"dydt_\" + name, \"eval\")\n \n# n_processes = 8\n# barycenters = self.get_barycenters()\n# inner_cids = []\n# cids = []\n# for i in xrange(n_processes):\n# cids.append(set())\n# inner_cids.append(set())\n# \n# for cid, coords in barycenters.items():\n# bin_ = np.sum((coords > np.median(barycenters.values(), axis=0)) * (2 ** np.arange(3)))\n# inner_cids[bin_].add(cid)\n# cids[bin_].add(cid)\n# cids[bin_].update(self.mesh.border_neighbors(3, cid))\n \n n_sub_bins = 3 * self.n_bins - 2\n cids = [self.cids[np.logical_and(i - 2 <= self.bins, self.bins<= i + 2)] for i in xrange(0, n_sub_bins, 3)]\n inner_cids = [self.cids[np.logical_and(i - 1 <= self.bins, self.bins<= i + 1)] for i in xrange(0, n_sub_bins, 3)]\n# print self.bins\n# print cids\n# print inner_cids\n \n dydt = multicell.parallel.ConcentrationTableMultiprocessing(self.names_species, self.cids) \n ct = multicell.parallel.ConcentrationTableMultiprocessing(self.names_species, self.cids)\n \n pool = multiprocess.Pool(initializer=init, initargs=(dydt.rawarray, ct.rawarray, self)) \n self.pool = pool\n \n def derivative(y, t):\n global dydt, ct, pool\n # Initialization of the derivative vector\n dydt.fill(0)\n ct.import_values(y)\n ct *= (ct>0)\n \n # multiprocessing\n \n pool.map(work, [(t, cids[i], inner_cids[i]) for i in xrange(self.n_bins)])\n# print dydt\n# pool.join()\n \n result = dydt.as_1d_array()\n\n # Test\n #print len(result), len(y)\n assert len(result) == len(y), \"y and dydt are different lengths\"\n \n for name in self.names_species:\n assert not np.any(np.isnan(self.y.current().get_species(name))), \"NaN value in concentrations of %s\" % name\n assert not np.any(np.isinf(self.y.current().get_species(name))), \"Inf value in concentrations of %s\" % name\n \n return result\n \n self.derivative = derivative", "def generate_input_from_MDELCC_grid(self, outdir, lat_dd, lon_dd,\n year_range):\n if not osp.exists(outdir):\n os.makedirs(outdir)\n\n lat_idx, lon_idx = self.get_idx_from_latlon(\n lat_dd, lon_dd, unique=True)\n lat_dd = [self.lat[i] for i in lat_idx]\n lon_dd = [self.lon[i] for i in lon_idx]\n\n # Fetch the daily weather data from the netCDF files.\n years = range(year_range[0], year_range[1] + 1)\n tasavg, precip, years = self.get_data_from_idx(lat_idx, lon_idx, years)\n\n # Create an array of datestring and lat/lon\n Ndt, Ndset = np.shape(tasavg)\n start = datetime.datetime(years[0], 1, 1)\n datetimes = [start + datetime.timedelta(days=i) for i in range(Ndt)]\n datestrings = [dt.strftime(\"%d/%m/%Y\") for dt in datetimes]\n\n # Fill -999 with 0 in daily precip.\n precip[:, :][precip[:, :] == -999] = 0\n\n # Fill -999 with linear interpolation in daily air temp.\n time_ = np.arange(Ndt)\n for i in range(Ndset):\n indx = np.where(tasavg[:, i] != -999)[0]\n tasavg[:, i] = np.interp(time_, time_[indx], tasavg[:, i][indx])\n\n # Convert and save the weather data to PyHelp csv input files.\n for var in ['precip', 'airtemp']:\n if var == 'precip':\n varname = 'Precipitation in mm'\n data = nan_as_text_tolist(precip)\n elif var == 'airtemp':\n varname = 'Average daily air temperature in \\u00B0C'\n data = nan_as_text_tolist(tasavg)\n fname = osp.join(outdir, var + '_input_data.csv')\n\n print('Saving {} data to {}...'.format(var, fname), end=' ')\n fheader = [\n [varname],\n ['', ''],\n ['Created by ' + __namever__],\n ['Created on ' + strftime(\"%d/%m/%Y\")],\n ['Created from MDDELCC grid'],\n ['', ''],\n ['Latitude (dd)'] + lat_dd,\n ['Longitude (dd)'] + lon_dd,\n ['', '']]\n fdata = [[datestrings[i]] + data[i] for i in range(Ndt)]\n fcontent = fheader + fdata\n save_content_to_csv(fname, fcontent)\n print('done')", "def calcBeamplot(PALC_config, gamma_n, plt_ranges, f, dire_meas_LSA, dire_meas_deg):\n # general\n N = PALC_config.N\n \n########################## SIMULATION SETUP ###############################\n # maximum of discrete mapping points:\n max_points = 10000\n # mesh\n x_range = plt_ranges.p_x\n y_range = plt_ranges.p_y\n pts_x = max_points / (y_range[1]-y_range[0])\n pts_y = max_points / (x_range[1]-x_range[0])\n\n x = np.linspace(x_range[0], x_range[1], num=int(pts_x+1))\n y = np.linspace(y_range[0], y_range[1], num=int(pts_y+1))\n\n X, Y = np.meshgrid(x,y)\n # get vertically array\n x_vert = np.reshape(X, np.size(X))\n y_vert = np.reshape(Y, np.size(Y))\n z_vert = np.array([0])\n\n # reference pressure\n p0 = 2 * 10**(-5)\n # considered frequency\n omega = 2 * np.pi * f\n # initialize driving fct. and output array\n D_opt_LSA = np.ones([N, 1])\n P_LSA = np.zeros([np.shape(x_vert)[0],1], dtype=complex) \n\n # air attenuation\n T = 293.15\n p = 101.325 * 10**(3)\n h = 50\n alpha, c = AirAbsorptionCoefficient(f, T, p, h)\n\n # directivity\n if PALC_config.directivity not in ['Measured Loudspeaker Data']:\n dire_meas_LSA = np.ones([1,1])\n dire_meas_deg = np.ones([1,1])\n \n ######################### SPL CALCULATION #################################\n x_start, y_start, x_stop, y_stop, x_c_n, y_c_n, x_S, y_S = source_pos(gamma_n, PALC_config)\n\n G_LSA_vert = CalcGreenFunctions(x_vert, y_vert, z_vert, x_c_n, y_c_n, 0.82,\\\n PALC_config.directivity, PALC_config.Lambda_y, \\\n gamma_n, c, omega, 1, dire_meas_LSA, \\\n dire_meas_deg, alpha, f, 0 )\n\n P_LSA[:,0] = G_LSA_vert @ D_opt_LSA[:,0]\n\n p_SPL = 20 * np.log10(np.abs(P_LSA) / p0)\n \n p_SPL = np.reshape(p_SPL, np.shape(X))\n\n return p_SPL, X, Y", "def derivative(self, x):\n z = np.asarray(x)\n return (self._der(z.flatten())).reshape(z.shape)", "def plot_ebtel_dem(data_directory,data_file,**kwargs):\n \n #Load DEM data\n data = np.loadtxt(data_directory+data_file)\n #Slice array to get necessary vectors\n temp = data[:,0]\n dem_tr = data[:,1]\n dem_cor = data[:,2]\n dem_tot = data[:,3]\n em_cor = data[:,4]\n \n #Set up the figure\n fig = plt.figure(figsize=(10,10))\n ax = fig.gca()\n fs = 16\n ax.plot(temp,dem_tr,label=r'TR')\n ax.plot(temp,dem_cor,'r',label=r'Corona')\n ax.plot(temp,dem_tot,'g',label=r'Total')\n ax.plot(temp,em_cor,'--r',label=r'Corona EM')\n ax.legend()\n ax.set_title(r'EBTEL-C DEM',fontsize=fs)\n ax.set_xlabel(r'$\\log(T_{DEM})$ (K)',fontsize=fs)\n ax.set_ylabel(r'$\\log($DEM$)$ (cm$^{-5}$ K$^{-1}$)',fontsize=fs)\n ax.set_xlim([5.5,7.5])\n ax.set_ylim([18.0,30.0])\n \n #Check if output filename is specified\n if 'print_fig_filename' in kwargs:\n plt.savefig(kwargs['print_fig_filename'],format='eps',dpi=1000)\n else:\n plt.show()", "def domain_decomposition(self, domain_decomposition):\n\n self._domain_decomposition = domain_decomposition", "def create_detr(num_classes: int, num_queries: int, backbone: str):\n\n model = DETR(num_classes, num_queries, backbone)\n return model", "def interpret(self):\n return binfracstr2decfrac(self.bin_value)", "def get_dfdebt(self, energy_debt):\n return pd.DataFrame([{'from': k[0], 'to': k[1], 'amount': v} for k, v in energy_debt.items()])", "def n1derivative_clbb(cl_array,bins,n1bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n1_py(clpp,norms,cls,cltt,clee,array1001[i],clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n b=compute_n1_py(clpp,norms,cls,cltt,clee,array999[i],clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n \n \n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n1bins)]-N0999[k][i][:len(n1bins)])*(n1bins*(n1bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n1{}dclbb.txt'.format(keys[k]),der)\n return derlist", "def _convert(self):\n logger.info(\"Converting conformers to density\")\n logger.debug(\"Masking\")\n self._transformer.reset(full=True)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self._transformer.mask(self._rmask)\n mask = self._transformer.xmap.array > 0\n self._transformer.reset(full=True)\n\n nvalues = mask.sum()\n self._target = self.xmap.array[mask]\n logger.debug(\"Density\")\n nmodels = len(self._coor_set)\n self._models = np.zeros((nmodels, nvalues), float)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self.conformer.b = self._bs[n]\n self._transformer.density()\n model = self._models[n]\n model[:] = self._transformer.xmap.array[mask]\n np.maximum(model, self.options.bulk_solvent_level, out=model)\n self._transformer.reset(full=True)", "def plot_DA(filename):\n\n # Set up an array of redshift values.\n dz = 0.1\n z = numpy.arange(0., 10. + dz, dz)\n\n # Set up a cosmology dictionary, with an array of matter density values.\n cosmo = {}\n dom = 0.01\n om = numpy.atleast_2d(numpy.linspace(0.1, 1.0, (1.-0.1)/dom)).transpose()\n cosmo['omega_M_0'] = om\n cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']\n cosmo['h'] = 0.701\n cosmo['omega_k_0'] = 0.0\n\n # Calculate the hubble distance.\n dh = cd.hubble_distance_z(0, **cosmo)\n # Calculate the angular diameter distance.\n da = cd.angular_diameter_distance(z, **cosmo)\n\n # Make plots.\n plot_dist(z, dz, om, dom, da, dh, 'angular diameter distance', r'D_A',\n filename)\n plot_dist_ony(z, dz, om, dom, da, dh, 'angular diameter distance', r'D_A',\n filename)", "def drfl_dsurface(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))", "def PolyDiff(u, x, deg = 3, diff = 1, width = 5):\n\n u = u.flatten()\n x = x.flatten()\n\n n = len(x)\n du = np.zeros((n - 2*width,diff))\n\n # Take the derivatives in the center of the domain\n for j in range(width, n-width):\n\n points = np.arange(j - width, j + width)\n\n # Fit to a polynomial\n poly = np.polynomial.chebyshev.Chebyshev.fit(x[points],u[points],deg)\n\n # Take derivatives\n for d in range(1,diff+1):\n du[j-width, d-1] = poly.deriv(m=d)(x[j])\n\n return du", "def dy(self):\n values = self._interpolate_table(\"dy\")\n\n return values", "def _kde_example(data):\n # Plot the data\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"density\")\n ch.set_title(\"KDE plot\")\n ch.plot.kde(data_frame=data, values_column=\"unit_price\", color_column=\"fruit\")\n ch.show(_OUTPUT_FORMAT)", "def __init__(self, x_len, num=(50, 0), den=(1, 50), dt=0.002, x_init=None):\n derivative_filter = signal.cont2discrete((num, den), dt)\n self.b = derivative_filter[0].ravel().astype(np.float32)\n self.a = derivative_filter[1].astype(np.float32)\n if x_init is None:\n self.z = np.zeros((max(len(self.a), len(self.b)) - 1, x_len),\n dtype=np.float32)\n else:\n self.set_initial_state(x_init)", "def plotData(BX,BY,xi,yi,expArr,t,savepath_dir):\r\n \r\n #Find the current channel data\r\n Jz=newCurrent(BX,BY,xi,yi,expArr,t)\r\n\r\n #Find the dipole vector components\r\n BxTime=np.real(BX*expArr[t])\r\n ByTime=np.real(BY*expArr[t])\r\n\r\n #Plot the current density contour and dipole vector grid\r\n #Create the figure\r\n p1=plt.figure(figsize=(9,8))\r\n \r\n #Plot the data\r\n p1=plt.contourf(xi,yi,Jz,levels=100,vmin=-0.1,vmax=0.1)\r\n qv1=plt.quiver(xi,yi,BxTime,ByTime,width=0.004,scale=3)\r\n \r\n #Add axes labels and title\r\n p1=plt.xlabel('X [cm]',fontsize=20)\r\n p1=plt.ylabel('Y [cm]',fontsize=20)\r\n # p1=plt.title('Alfven Wave Dipole; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n p1=plt.title('E Field; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n \r\n #Set axes parameters\r\n p1=plt.xticks(np.arange(-50,51,5))\r\n p1=plt.yticks(np.arange(-50,51,5))\r\n p1=plt.xlim(-xAxisLim,xAxisLim)\r\n p1=plt.ylim(-yAxisLim,yAxisLim)\r\n \r\n #Add colorbar\r\n cbar=plt.colorbar()\r\n cbar.set_label('Normalized Current Density',rotation=270,labelpad=15)\r\n cbar=plt.clim(-1,1)\r\n \r\n #Add vector label\r\n plt.quiverkey(qv1,-0.1,-0.1,0.2,label=r'$(B_x,B_y)$')\r\n \r\n #Miscellaneous\r\n p1=plt.tick_params(axis='both', which='major', labelsize=18)\r\n p1=plt.grid(True)\r\n p1=plt.gcf().subplots_adjust(left=0.15)\r\n\r\n #Save the plot\r\n savepath_frame=savepath_dir+'frame'+str(t+1)+'.png'\r\n p1=plt.savefig(savepath_frame,dpi=100,bbox_to_anchor='tight')\r\n p1=plt.close()\r\n\r\n #Let me know which frame we just saved\r\n print('Saved frame '+str(t+1)+' of '+str(len(expArr)))\r\n \r\n return", "def extract_depto(succId = 439) :\n deps_api = 'https://www.lacomer.com.mx/lacomer-api/api/v1/public/header/inicio?cambioSucc=false&succFmt=100&succId={}'.format(succId)\n deps_json = json.loads(requests.get(deps_api).text)['departamentos']\n deps_list = list(deps_json.keys())\n deps_df = []\n\n for depto in deps_list :\n \n tmp = pd.DataFrame(deps_json[depto])\n tmp['dept'] = depto\n deps_df.append(tmp)\n \n deps_df = pd.concat(deps_df)\n deps_df['dept'].unique()\n\n sucursales = extract_sucursales()\n deps_df['succId'] = succId\n deps_df['sucursal'] = sucursales.loc[sucursales['id'] == succId,'sucursal'].iloc[0]\n\n return deps_df", "def get_base_depos(self, start, end, currencies, tenor, cut=\"NYC\", source=\"bloomberg\",\n cache_algo=\"internet_load_return\"):\n\n market_data_generator = self.market_data_generator\n\n if isinstance(currencies, str): currencies = [currencies]\n if isinstance(tenor, str): tenor = [tenor]\n\n tickers = []\n\n for cr in currencies:\n for tn in tenor:\n tickers.append(cr + tn)\n\n market_data_request = MarketDataRequest(\n start_date=start, finish_date=end,\n data_source=source,\n category='base-depos',\n freq='daily',\n cut=cut,\n tickers=tickers,\n fields=['close'],\n cache_algo=cache_algo,\n environment='backtest'\n )\n\n data_frame = market_data_generator.fetch_market_data(market_data_request)\n data_frame.index.name = 'Date'\n\n return data_frame", "def from_definition(cls, d):\n\n if \"values\" not in d:\n raise ValueError('ArrayCoordinates1d definition requires \"values\" property')\n\n coordinates = d[\"values\"]\n kwargs = {k: v for k, v in d.items() if k != \"values\"}\n return cls(coordinates, **kwargs)", "def build_meshfn(lp):\n # Place values assoc with keys of lattice_params as their defaults if not specified\n LatticeTop = lp['LatticeTop']\n shape = lp['shape']\n rootdir = lp['rootdir']\n NH = lp['NH']\n NV = lp['NV']\n if isinstance(NH, float):\n raise RuntimeError('Why is NH a float?')\n\n if 'cutLstr' in lp:\n cutLstr = lp['cutLstr']\n else:\n cutLstr = ''\n\n if 'delta_lattice' in lp:\n delta_lattice = lp['delta_lattice']\n elif 'delta' in lp:\n delta_lattice = '{0:0.3f}'.format(lp['delta'] / np.pi).replace('.', 'p')\n else:\n delta_lattice = ''\n lp['delta_lattice'] = delta_lattice\n\n print '\\n\\n\\n in le: delta_lattice = ', lp['delta_lattice'], '\\n\\n\\n'\n # sys.exit()\n\n if 'phi_lattice' in lp:\n phi_lattice = lp['phi_lattice'].replace('.', 'p').replace('-', 'n')\n else:\n if 'phi' in lp:\n phi_lattice = '{0:0.3f}'.format(lp['phi'] / np.pi).replace('.', 'p').replace('-', 'n')\n else:\n phi_lattice = '0p000'\n\n if 'theta_lattice' in lp:\n theta_lattice = lp['theta_lattice']\n else:\n theta_lattice = ''\n if 'eta' in lp:\n eta = lp['eta']\n else:\n eta = ''\n if 'huID' in lp:\n huID = lp['huID']\n elif 'conf' in lp:\n huID = '{0:02d}'.format(int(lp['conf']))\n else:\n huID = '01'\n if 'zkagome' in lp:\n zkagome = lp['zkagome']\n else:\n zkagome = -1\n if 'z' in lp:\n z = str(lp['z'])\n else:\n z = -1\n if 'origin' in lp:\n print 'lp[origin] = ', lp['origin']\n print \"(np.abs(lp['origin']) > 1e-7) = \", (np.abs(lp['origin']) > 1e-7)\n if (np.abs(lp['origin']) > 1e-7).any():\n originstr = '_originX' + '{0:0.2f}'.format(lp['origin'][0]).replace('.', 'p') + \\\n 'Y' + '{0:0.2f}'.format(lp['origin'][1]).replace('.', 'p')\n else:\n originstr = ''\n else:\n originstr = ''\n\n if 'periodic_strip' not in lp:\n lp['periodic_strip'] = False\n\n print '\\n\\n\\noriginstr = ', originstr\n print 'Searching for ' + LatticeTop + ' lattice...'\n\n # make sure rootdir ends with /\n rootdir = dio.prepdir(rootdir)\n ########################################################################################\n ########################################################################################\n print 'LatticeTop =', LatticeTop\n if LatticeTop == 'square':\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodicBC'\n else:\n periodicstr = ''\n\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta).replace('.', 'p')\n\n if theta_lattice == 0. or theta_lattice == '':\n thetastr = ''\n else:\n thetastr = '_theta{0:.3f}'.format(theta_lattice).replace('.', 'p') + 'pi'\n\n etatheta_str = etastr + thetastr\n print '\\n\\n', etatheta_str\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + \\\n etatheta_str + '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['hexagonal', 'hexmeanfield']:\n print '... forming hexagonal meshfn...'\n print 'le: again, lp[periodic_strip] = ', lp['periodic_strip']\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodicBC'\n else:\n periodicstr = ''\n\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta).replace('.', 'p')\n\n if theta_lattice == 0. or theta_lattice == '':\n thetastr = ''\n else:\n thetastr = '_theta{0:.3f}'.format(theta_lattice).replace('.', 'p') + 'pi'\n\n delta_phi_str = '_delta' + delta_lattice.replace('.', 'p') + '_phi' + phi_lattice + etastr + thetastr\n print '\\n\\n', delta_phi_str\n print('NH = ', NH)\n print('NV = ', NV)\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + \\\n delta_phi_str + '_' + '{0:06d}'.format(int(NH)) + '_x_' + '{0:06d}'.format(int(NV)) + \\\n cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'hexannulus':\n # correct NV if it equals NH --> this would never be possible, and so if NV isn't specified (ie N=NH=NV is\n # specified), then use alph to determine the thickness of the annulus\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta).replace('.', 'p')\n\n delta_phi_str = '_delta' + delta_lattice.replace('.', 'p') + '_phi' + phi_lattice + etastr\n alphstr = '_alph{0:0.2f}'.format(lp['alph']).replace('.', 'p')\n print '\\n\\n', delta_phi_str\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_circle' + \\\n delta_phi_str + alphstr + '_' + '{0:06d}'.format(NH) + '_x_' + '*' + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif 'selregion' in LatticeTop:\n # Assume there is only one instance of this selregion LatticeTop with a given NP size\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '*NP{0:06d}'.format(lp['NP_load']) + '_xy.txt'\n elif LatticeTop == 'triangular':\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta)\n\n if theta_lattice == 0. or theta_lattice == '':\n thetastr = ''\n else:\n thetastr = '_theta{0:.3f}'.format(theta_lattice).replace('.', 'p') + 'pi'\n\n extrastr = etastr + thetastr\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + extrastr + '_' + \\\n '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'jammed' or LatticeTop == 'isostatic':\n if lp['periodicBC']:\n if LatticeTop == 'jammed':\n periodicstr = '_periodicBC'\n else:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n if lp['source'] == 'ulrich':\n hustr = '_homog_z' + '{0:0.03f}'.format(lp['target_z']) + '_conf' + huID + '_zmethod' + lp['cutz_method']\n elif lp['source'] == 'hexner':\n if lp['NP_load'] > 0:\n hustr = periodicstr + '_hexner' + '_z*_conf' + huID + '_zmethod' + lp['cutz_method']\n else:\n print '---> here <----'\n if float(z) > 0:\n zstr = '{0:0.03f}'.format(float(z))\n else:\n zstr = '*'\n\n hustr = '_hexner' + periodicstr + '_z' + zstr + '_conf' + huID + '_zmethod' + lp['cutz_method']\n if lp['NP_load'] > 0:\n print '{0:06d}'.format(lp['NP_load'])\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + hustr + '_NP' + \\\n '{0:06d}'.format(lp['NP_load']) + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + hustr + '_' + \\\n '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'deformed_kagome' or LatticeTop == 'deformed_martini':\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n if np.abs(lp['theta']) > 1e-9:\n thetastr = '_theta{0:0.3f}'.format(np.round(lp['theta'] * 1000) * 0.001).replace('.', 'p')\n else:\n thetastr = ''\n\n paramstr = '_x1_' + '{0:0.4f}'.format(lp['x1']).replace('.', 'p').replace('-', 'n') + \\\n '_x2_' + '{0:0.4f}'.format(lp['x2']).replace('.', 'p').replace('-', 'n') + \\\n '_x3_' + '{0:0.4f}'.format(lp['x3']).replace('.', 'p').replace('-', 'n') + \\\n '_z_' + '{0:0.4f}'.format(lp['z']).replace('.', 'p').replace('-', 'n')\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr +\\\n thetastr + paramstr + '_{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'twisted_kagome':\n paramstr = '_alph_' + '{0:0.4f}'.format(lp['alph'])\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + paramstr + '_{0:06d}'.format(\n NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif 'hyperuniform' in LatticeTop:\n # hyperuniform ID string\n hustr = '_d' + huID + '_z{0:0.3f}'.format(lp['target_z']).replace('.', 'p').replace('-', 'n')\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + '_' + \\\n '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['hucentroid', 'huvoronoi']:\n # hyperuniform ID string\n hustr = '_d' + huID\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n stripnhnv = '_NH{0:06d}'.format(lp['NH']) + '_NV{0:06d}'.format(lp['NV'])\n else:\n periodicstr = '_periodic'\n stripnhnv = ''\n else:\n periodicstr = ''\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + stripnhnv + '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['kagome_hucent', 'kagome_huvor']:\n # hyperuniform ID string\n hustr = '_d' + huID\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_periodic' + hustr \\\n + '_NP{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + hustr + \\\n '_{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n # elif 'isostatic' in LatticeTop :\n # # hyperuniform ID string --> isostatic ID string\n # hustr = '_homog_zindex001'+'_conf'+huID\n # ffind = rootdir+'networks/'+LatticeTop+'/'+LatticeTop+'_'+shape+hustr+'_'+'{0:06d}'.format(NH)+'_x_'+\n # '{0:06d}'.format(NV)+cutLstr+'_xy.txt'\n # print 'searching for ', ffind\n elif LatticeTop in ['iscentroid', 'isvoronoi']:\n # isostatic ID string\n if lp['NP_load'] > 0:\n hustr = '_hexner_size' + str(lp['NP_load']) + '_conf' + huID\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_periodic' + hustr + \\\n '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n if lp['source'] == 'ulrich':\n hustr = '_homog_zindex001' + '_conf' + huID\n elif lp['source'] == 'hexner':\n if NH > 10 or NV > 10:\n hustr = '_hexner_size8192_conf' + huID\n else:\n hustr = '_hexner_size0512_conf' + huID\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + hustr + '_' + '{0:06d}'.format(\n NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['kagome_isocent', 'kagome_isovor']:\n # isostatic ID string\n if lp['source'] == 'ulrich':\n hustr = '_ulrich_homog_zindex001' + '_conf' + huID\n elif lp['source'] == 'hexner':\n if lp['periodicBC'] and lp['NP_load'] > 0:\n hustr = '_hexner_size' + str(lp['NP_load']) + '_conf' + huID\n elif NH > 13 or NV > 13:\n hustr = '_hexner_size8192_conf' + huID\n else:\n hustr = '_hexner_size0512_conf' + huID\n if lp['periodicBC'] and lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_periodic' + hustr + \\\n '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + hustr + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['iscentroid_annulus', 'kagome_iscent_annulus']:\n # hyperuniform ID string\n lp['shape'] = 'annulus'\n shape = lp['shape']\n hustr = '_d' + huID\n if lp['periodicBC'] or lp['periodic_strip']:\n raise RuntimeError('Network is labeled as periodic but is also an annulus.')\n\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + hustr + \\\n '_alph' + sf.float2pstr(lp['alph']) + \\\n originstr + '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['hucentroid_annulus', 'kagome_hucent_annulus']:\n # hyperuniform ID string\n lp['shape'] = 'annulus'\n shape = lp['shape']\n hustr = '_d' + huID\n if lp['periodicBC'] or lp['periodic_strip']:\n raise RuntimeError('Network is labeled as periodic but is also an annulus.')\n\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + hustr + \\\n '_alph' + sf.float2pstr(lp['alph']) + \\\n originstr + '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'linear':\n etastr = '{0:.3f}'.format(lp['eta']).replace('.', 'p')\n thetastr = '{0:.3f}'.format(lp['theta']).replace('.', 'p')\n if lp['periodicBC']:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n exten = periodicstr + '_line_theta' + thetastr + 'pi_eta' + etastr\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + exten + '_{0:06d}'.format(NH) + \\\n '_x_' + '{0:06d}'.format(1) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'circlebonds':\n # circle of particles connected in a periodic line\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_{0:06d}'.format(NH) + \\\n '_x_' + '{0:06d}'.format(1) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'dislocated':\n Ndefects = str(lp['Ndefects'])\n Bvec = lp['Bvec']\n dislocxy = lp['dislocxy'] # specifies the position of a single defect, if not centered, as tuple of strings\n if dislocxy == 'none':\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_Ndefects' + Ndefects + \\\n '_Bvec' + Bvec + '_{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_Ndefects' + Ndefects + \\\n '_Bvec' + Bvec + '_dislocxy_' + str(dislocxy[0]) + '_' + str(dislocxy[1]) + '_{0:06d}'.format(NH) + \\\n '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'dislocatedTriangular':\n Ndefects = str(lp['Ndefects'])\n Bvec = lp['Bvec']\n try:\n dislocxy = lp['dislocxy'] # specifies the position of a single defect, if not centered, as tuple of strings\n except:\n dislocxy = 'none'\n\n if dislocxy == 'none':\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_Ndefects' + Ndefects + \\\n '_Bvec' + Bvec + '_{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_Ndefects' + Ndefects + \\\n '_Bvec' + Bvec + '_dislocxy_' + str(dislocxy[0]) + '_' + str(dislocxy[1]) + '_{0:06d}'.format(NH) + \\\n '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'dislocatedRand':\n Ndefects = str(lp['Ndefects'])\n Bvec = lp['Bvec']\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_Ndefects' + Ndefects + \\\n '_Bvec' + Bvec + '_{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'triangularz':\n zmethodstr = lp['cutz_method']\n zstr = str(lp['z'])\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_zmethod' + zmethodstr + \\\n '_z' + zstr + '_{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'penroserhombTri':\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n perstr = ''\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + perstr + '_' + shape + \\\n '_div*_{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'penroserhombTricent':\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n perstr = ''\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + perstr + '_' + shape + \\\n '_div*_{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'kagome_penroserhombTricent':\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_div*_{0:06d}'.format(\n NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif 'random_organization_gamma' in LatticeTop:\n hustr = '_d' + huID\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + hustr + '_' + \\\n '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'kagper_hucent':\n print '\\n\\n sub-realization number (for given hu realization, which decoration?): lp[subconf] = ', lp[\n 'subconf'], '\\n'\n # hyperuniform ID string\n hustr = '_d' + huID\n perdstr = '_perd' + '{0:0.2f}'.format(lp['percolation_density']).replace('.', 'p')\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + perdstr + \\\n '_r' + '{0:02d}'.format(int(lp['subconf'])) + \\\n '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + perdstr + \\\n '_r' + '{0:02d}'.format(int(lp['subconf'])) + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['hucent_kagframe', 'kaghu_centframe', 'hucent_kagcframe']:\n # hyperuniform ID string\n hustr = '_d' + huID\n alphstr = '_alph' + '{0:0.2f}'.format(lp['alph']).replace('.', 'p')\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + alphstr + '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + alphstr + '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(\n NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['isocent_kagframe', 'isocent_kagcframe']:\n # isostatic ID string\n if lp['source'] == 'ulrich':\n hustr = '_ulrich_homog_zindex001' + '_conf' + huID\n elif lp['source'] == 'hexner':\n if lp['periodicBC'] and lp['NP_load'] > 0:\n hustr = '_hexner_size' + str(lp['NP_load']) + '_conf' + huID\n elif NH > 80 or NV > 80:\n hustr = '_hexner_size128000_conf' + huID\n elif NH > 15 or NV > 15:\n hustr = '_hexner_size8192_conf' + huID\n else:\n hustr = '_hexner_size0512_conf' + huID\n perdstr = '_alph' + '{0:0.2f}'.format(lp['alph']).replace('.', 'p')\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + perdstr + '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + perdstr + '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(\n NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'hex_kagframe' or LatticeTop == 'hex_kagcframe':\n alphstr = '_alph' + '{0:0.2f}'.format(lp['alph']).replace('.', 'p')\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta).replace('.', 'p')\n if 'eta_alph' not in lp:\n print 'did not find eta_alph in lp, using alph value as eta_alph...'\n lp['eta_alph'] = lp['alph']\n\n etastr += '_etaalph' + sf.float2pstr(lp['eta_alph'], ndigits=3)\n\n if theta_lattice == 0. or theta_lattice == '':\n thetastr = ''\n else:\n thetastr = '_theta{0:.3f}'.format(theta_lattice).replace('.', 'p') + 'pi'\n\n delta_phi_str = '_delta' + delta_lattice.replace('.', 'p') + '_phi' + \\\n phi_lattice.replace('.', 'p') + thetastr\n\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + delta_phi_str + \\\n originstr + alphstr + etastr + '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + delta_phi_str + \\\n originstr + alphstr + etastr + '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + \\\n cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop == 'kagsplit_hex':\n alphstr = '_alph' + '{0:0.2f}'.format(lp['alph']).replace('.', 'p')\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodicBC'\n else:\n periodicstr = ''\n\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta)\n if theta_lattice == 0. or theta_lattice == '':\n thetastr = ''\n else:\n thetastr = '_theta{0:.3f}'.format(theta_lattice).replace('.', 'p') + 'pi'\n\n delta_phi_str = '_delta' + delta_lattice.replace('.', 'p') + '_phi' + phi_lattice + etastr + thetastr\n print '\\n\\n', delta_phi_str\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + delta_phi_str + \\\n alphstr + '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['kagper_hex', 'kagpergrid_hex']:\n perdstr = '_perd' + '{0:0.2f}'.format(lp['percolation_density']).replace('.', 'p')\n if LatticeTop == 'kagpergrid_hex':\n perdstr += '_alph' + '{0:0.2f}'.format(lp['alph']).replace('.', 'p')\n\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodicBC'\n else:\n periodicstr = ''\n\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta)\n\n if theta_lattice == 0. or theta_lattice == '':\n thetastr = ''\n else:\n thetastr = '_theta{0:.3f}'.format(theta_lattice).replace('.', 'p') + 'pi'\n\n delta_phi_str = '_delta' + delta_lattice.replace('.', 'p') + '_phi' + phi_lattice + etastr + thetastr\n # get string for configuration number\n if 'conf' in lp:\n confstr = '_conf{0:04d}'.format(lp['conf'])\n else:\n confstr = ''\n\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + \\\n delta_phi_str + perdstr + confstr + \\\n '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + \\\n delta_phi_str + perdstr + confstr + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['randomcent', 'kagome_randomcent']:\n if lp['periodicBC']:\n if lp['periodic_strip']:\n perstr = '_periodicstrip'\n else:\n perstr = '_periodic'\n else:\n perstr = ''\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + perstr + '_r' + \\\n '{0:02d}'.format(int(lp['conf'])) + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['randomspreadcent', 'kagome_randomspreadcent']:\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n perstr = ''\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + perstr + '_r' + \\\n '{0:02d}'.format(int(lp['conf'])) + \\\n '_spreadt{0:0.3f}'.format(lp['spreading_time']).replace('.', 'p') + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['uofc_hucent', 'uofc_kaglow_hucent', 'uofc_kaghi_hucent',\n 'kaghi_hucent_curvys', 'kaglow_hucent_curvys']:\n hustr = '_d' + huID\n if 'thres' not in lp:\n lp['thres'] = 1.0\n\n if 'curvys' in LatticeTop:\n aratiostr = '_aratio{0:0.3f}'.format(lp['aratio']).replace('.', 'p')\n else:\n aratiostr = ''\n\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + hustr + \\\n '_thres' + sf.float2pstr(lp['thres'], ndigits=1) + aratiostr + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif LatticeTop in ['uofc_isocent', 'uofc_kaglow_isocent', 'uofc_kaghi_isocent', 'chicago_kaglow_isocent',\n 'chicago_kaghi_isocent', 'kaghi_isocent_chern', 'kaghi_hucent_chern',\n 'csmf_kaghi_isocent', 'kaghi_isocent_thanks',\n 'kaghi_isocent_curvys', 'kaglow_isocent_curvys']:\n if lp['source'] == 'ulrich':\n hustr = '_ulrich_homog_zindex001' + '_conf' + huID\n elif lp['source'] == 'hexner':\n if lp['periodicBC'] and lp['NP_load'] > 0:\n hustr = '_hexner_size' + str(lp['NP_load']) + '_conf' + huID\n elif NH > 80.5 or NV > 80.5:\n hustr = '_hexner_size128000_conf' + huID\n elif NH > 9 or NV > 9:\n hustr = '_hexner_size8192_conf' + huID\n else:\n hustr = '_hexner_size0512_conf' + huID\n\n if 'curvys' in LatticeTop:\n aratiostr = '_aratio{0:0.3f}'.format(lp['aratio']).replace('.', 'p')\n else:\n aratiostr = ''\n\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n if 'thres' not in lp:\n lp['thres'] = 1.0\n\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + \\\n '_thres' + sf.float2pstr(lp['thres'], ndigits=1) + aratiostr + \\\n '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + hustr + \\\n originstr + \\\n '_thres' + sf.float2pstr(lp['thres'], ndigits=1) + aratiostr + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif 'kaghi_randorg_gammakick' in LatticeTop and 'cent_curvys' in LatticeTop:\n # kaghi_randorg_gammakick1p60_cent_curvys\n # For cover optios in Nature Physics paper\n aratiostr = '_aratio{0:0.3f}'.format(lp['aratio']).replace('.', 'p')\n kickszstr = '_kicksz' + sf.float2pstr(lp['kicksz'], ndigits=3)\n spreadtstr = '_spreadt' + sf.float2pstr(lp['spreading_time'], ndigits=3)\n dtstr = '_dt' + sf.float2pstr(lp['spreading_dt'], ndigits=3)\n # for ensuring that no points are too close\n # alphstr =\n if lp['periodic_strip']:\n lp['NP_load'] = lp['NH'] * lp['NV']\n ffind = rootdir + 'networks/' + LatticeTop + '/' + \\\n lp['LatticeTop'] + '_' + lp['shape'] + '_periodicstrip' + kickszstr + spreadtstr + dtstr + \\\n '_d' + '{0:02d}'.format(int(lp['conf'])) + \\\n aratiostr + '_NP{0:06d}'.format(lp['NP_load']) + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + '_xy.txt'\n elif lp['periodicBC']:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + \\\n lp['LatticeTop'] + '_' + lp['shape'] + '_periodic' + kickszstr + spreadtstr + dtstr + \\\n '_d' + '{0:02d}'.format(int(lp['conf'])) + aratiostr + \\\n '_NP{0:06d}'.format(lp['NP_load']) + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + \\\n lp['LatticeTop'] + '_' + lp['shape'] + kickszstr + spreadtstr + dtstr + \\\n '_d' + '{0:02d}'.format(int(lp['conf'])) + \\\n aratiostr + '_NP{0:06d}'.format(lp['NP_load']) + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n\n print 'searching for ', ffind\n elif LatticeTop == 'kagome':\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta).replace('.', 'p')\n\n if theta_lattice == 0. or theta_lattice == '':\n thetastr = ''\n else:\n thetastr = '_theta{0:.3f}'.format(theta_lattice).replace('.', 'p') + 'pi'\n\n delta_phi_str = '_delta' + delta_lattice.replace('.', 'p') + '_phi' + phi_lattice + etastr + thetastr\n\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + \\\n delta_phi_str + '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif 'randorg_gammakick' in LatticeTop:\n kickszstr = '_kicksz' + sf.float2pstr(lp['kicksz'], ndigits=3)\n spreadtstr = '_spreadt' + sf.float2pstr(lp['spreading_time'], ndigits=3)\n if lp['periodic_strip']:\n lp['NP_load'] = lp['NH'] * lp['NV']\n ffind = rootdir + 'networks/' + LatticeTop + '/' + \\\n lp['LatticeTop'] + '_' + lp['shape'] + '_periodicstrip' + kickszstr + spreadtstr + \\\n '_d' + '{0:02d}'.format(int(lp['conf'])) + '_NP{0:06d}'.format(lp['NP_load']) + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + '_xy.txt'\n elif lp['periodicBC']:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + \\\n lp['LatticeTop'] + '_' + lp['shape'] + '_periodic' + kickszstr + spreadtstr + \\\n '_d' + '{0:02d}'.format(int(lp['conf'])) + '_NP{0:06d}'.format(lp['NP_load']) + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + \\\n lp['LatticeTop'] + '_' + lp['shape'] + kickszstr + spreadtstr + \\\n '_d' + '{0:02d}'.format(int(lp['conf'])) + '_NP{0:06d}'.format(lp['NP_load']) + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n\n print 'searching for ', ffind\n elif 'randorg_gamma' in LatticeTop:\n # NOTE THAT WE USE RANDORG_GAMMAKICK NOW\n raise RuntimeError('We use randorg_gammakick now instead of randorg_gamma.')\n spreadtstr = 'spreadt' + sf.float2pstr(lp['spreading_time'], ndigits=3)\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + \\\n lp['LatticeTop'] + '_' + lp['shape'] + '_periodic_' + spreadtstr + \\\n '_d' + '{0:02d}'.format(int(lp['conf'])) + '_NP{0:06d}'.format(lp['NP_load']) + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + \\\n lp['LatticeTop'] + '_' + lp['shape'] + '_' + spreadtstr + \\\n '_d' + '{0:02d}'.format(int(lp['conf'])) + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif 'accordion' in LatticeTop:\n if 'hucent' in LatticeTop:\n # hyperuniform ID string\n hustr = '_d' + huID\n alphstr = '_alph' + sf.float2pstr(lp['alph']) + '_nzag{0:02d}'.format(lp['intparam'])\n if lp['NP_load'] > 0:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_periodic' + hustr \\\n + alphstr + '_NP{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + hustr \\\n + alphstr + '_{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n elif 'isocent' in LatticeTop:\n # accordionkag_isocent or accordionhex_isocent\n alphstr = 'alph' + sf.float2pstr(lp['alph']) + '_nzag{0:02d}'.format(lp['intparam']) + '_'\n # isostatic ID string\n if lp['NP_load'] > 0:\n hustr = '_hexner_size' + str(lp['NP_load']) + '_conf' + huID\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + '_periodic' + hustr + \\\n alphstr + '_NP' + '{0:06d}'.format(lp['NP_load']) + cutLstr + '_xy.txt'\n else:\n if lp['source'] == 'ulrich':\n hustr = '_homog_zindex001' + '_conf' + huID\n elif lp['source'] == 'hexner':\n if NH > 10 or NV > 10:\n hustr = '_hexner_size8192_conf' + huID\n else:\n hustr = '_hexner_size0512_conf' + huID\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + hustr + '_' + \\\n alphstr + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n elif LatticeTop in ['accordionhex', 'accordionkag']:\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodicBC'\n else:\n periodicstr = ''\n\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta).replace('.', 'p')\n\n if theta_lattice == 0. or theta_lattice == '':\n thetastr = ''\n else:\n thetastr = '_theta{0:.3f}'.format(theta_lattice).replace('.', 'p') + 'pi'\n\n alphstr = '_alph' + sf.float2pstr(lp['alph']) + '_nzag{0:02d}'.format(lp['intparam'])\n\n if 'eta_alph' in lp:\n if lp['eta_alph'] > 0:\n alphstr += '_etaalph' + sf.float2pstr(lp['eta_alph'], ndigits=2)\n\n delta_phi_str = '_delta' + delta_lattice.replace('.', 'p') + '_phi' + phi_lattice + etastr + thetastr\n print '\\n\\n', delta_phi_str\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + \\\n delta_phi_str + alphstr + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif 'spindle' in LatticeTop:\n if LatticeTop=='spindle':\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodicBC'\n else:\n periodicstr = ''\n\n if np.abs(lp['aratio'] - 1.0) > 1e-9:\n aratiostr = '_aratio{0:0.3f}'.format(lp['aratio']).replace('.', 'p')\n else:\n aratiostr = ''\n\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta).replace('.', 'p')\n\n if theta_lattice == 0. or theta_lattice == '':\n thetastr = ''\n else:\n thetastr = '_theta{0:.3f}'.format(theta_lattice).replace('.', 'p') + 'pi'\n\n alphstr = '_alph' + sf.float2pstr(lp['alph'], ndigits=4)\n\n if 'eta_alph' in lp:\n if lp['eta_alph'] > 0:\n alphstr += '_etaalph' + sf.float2pstr(lp['eta_alph'], ndigits=2)\n\n delta_phi_str = '_delta' + delta_lattice.replace('.', 'p') + '_phi' + phi_lattice + etastr + thetastr\n print '\\n\\n', delta_phi_str\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + \\\n delta_phi_str + alphstr + aratiostr + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n else:\n raise RuntimeError('only spindley lattice coded in le is spindle itself')\n print 'searching for ', ffind\n elif LatticeTop == 'stackedrhombic':\n if lp['periodicBC']:\n if lp['periodic_strip']:\n periodicstr = '_periodicstrip'\n else:\n periodicstr = '_periodicBC'\n else:\n periodicstr = ''\n\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta).replace('.', 'p')\n\n if theta_lattice == 0. or theta_lattice == '':\n thetastr = ''\n else:\n thetastr = '_theta{0:.3f}'.format(theta_lattice).replace('.', 'p') + 'pi'\n\n stacknum = '_stack' + str(lp['intparam'])\n\n if 'phi_lattice' not in lp:\n lp['phi_lattice'] = sf.float2pstr(lp['phi'] / np.pi, ndigits=3)\n\n phi_str = '_phi' + lp['phi_lattice'].replace('.', 'p') + 'pi' + etastr + thetastr\n print '\\n\\n', phi_str\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + periodicstr + \\\n stacknum + phi_str + \\\n '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n elif 'junction' in LatticeTop:\n # hexjunction or kagjunction\n # python ./build/make_lattice.py -LT junctiontriad\n if eta == 0. or eta == '':\n etastr = ''\n else:\n etastr = '_eta{0:.3f}'.format(eta).replace('.', 'p')\n if LatticeTop == 'hexjunctiontriad' or 'hexjunction2triads':\n alphstr = '_alph' + sf.float2pstr(lp['alph'], ndigits=6)\n if lp['periodicBC']:\n periodicstr = '_periodic'\n else:\n periodicstr = ''\n else:\n alphstr = '_alph' + sf.float2pstr(lp['alph']) + '_nzag{0:02d}'.format(lp['intparam'])\n periodicstr = ''\n\n delta_phi_str = '_delta' + delta_lattice.replace('.', 'p') + '_phi' + phi_lattice + etastr\n print '\\n\\n', delta_phi_str\n ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + periodicstr + delta_phi_str + \\\n alphstr + '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n print 'searching for ', ffind\n\n # else:\n # theta_eta_str = '_theta' + '{0:.3f}'.format(theta_lattice / np.pi).replace('.','p') +\\\n # 'pi' + '_eta{0:.3f}'.format(eta).replace('.', 'p')\n # ffind = rootdir + 'networks/' + LatticeTop + '/' + LatticeTop + '_' + shape + theta_eta_str +\n # '_' + '{0:06d}'.format(NH) + '_x_' + '{0:06d}'.format(NV) + cutLstr + '_xy.txt'\n # print 'searching for ', ffind\n xyffind = ffind\n meshfn = ffind[0:-7]\n print 'le.build_meshfn(): returning meshfn = ', meshfn\n return meshfn, xyffind", "def ddm(self):\n return dec2ddm(self.dec_angle)", "def contato(self, def_data): \n self.xdi = def_data[0]\n self.ydi = np.zeros(len(self.xdi))\n self.zdi = def_data[2] \n\n self.yd = np.zeros(len(self.xdi))\n self.at_b()\n self.ydi = self.yd", "def __init__(self, date_debut, date_fin, lieu_depart, lieu_arrivee, distance, voiture, client, cout):\r\n\r\n self._date_debut = date_debut\r\n self._date_fin = date_fin\r\n self._lieu_depart = lieu_depart\r\n self._lieu_arrivee = lieu_arrivee\r\n self._distance = distance\r\n self._voiture = voiture\r\n self._client = client", "def epics_data_plot(data):\n if isinstance(data, (xr.DataArray, xr.Dataset)):\n data = data.to_dataframe()", "def cartesian2Geo(julian_date, x, y, z):\n\n\n # Calculate LLA\n lat, r_LST, ele = ecef2LatLonAlt(x, y, z)\n\n # Calculate proper longitude from the given JD\n lon, _ = LST2LongitudeEast(julian_date, np.degrees(r_LST))\n\n # Convert longitude to radians\n lon = np.radians(lon)\n\n\n return np.degrees(lat), np.degrees(lon), ele", "def plotfrom_dlos(self, dlosdata, **pltkwargs):\n\n # arbitrary catalog correction dictionary \n cat_corr = {\n 'catalog': {'name': 'nseries', 'n_mock': 1}, \n 'correction': {'name': 'upweight'} \n }\n\n dlosclass = Dlos(cat_corr)\n dlosclass.dlos = dlosdata\n \n if 'binsize' in pltkwargs.keys(): \n if isinstance(pltkwargs['binsize'], float): \n binsize = pltkwargs['binsize']\n else: \n if pltkwargs['binsize'] == 'fd_binsize': \n binsize = dlosclass.fd_binsize()\n\n pltkwargs.pop('binsize', None) # remove from dictionary\n else: \n binsize = 0.5 # (default)\n \n xmid, dlos_hist = dlosclass.dlos_dist( binsize = binsize ) \n\n if 'rescale' in pltkwargs.keys(): \n dlos_hist = dlos_hist * pltkwargs['rescale']\n pltkwargs.pop('rescale', None)\n \n self.sub.plot(xmid, dlos_hist, **pltkwargs) \n \n self.hist_max = max([ dlos_hist.max(), self.hist_max ]) \n \n return None", "def get_deer_load_shape(database_year):\n con = get_db_connection(database_year=database_year)\n return pd.read_sql_table(\"deer_load_shapes\", con=con).set_index(\"hour_of_year\")", "def processPCDD(pcdd, tag):\n\n global highPrice\n global lowPrice\n \n # Return value.\n rv = 0\n\n stepSizeTd = datetime.timedelta(days=3)\n #highPrice = 800.0\n #highPrice = 600.0\n #lowPrice = 600.0\n #lowPrice = 300.0\n\n if True:\n degreeValue = 0\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Moon\", \"geocentric\", \"tropical\",\n \"Sun\", \"geocentric\", \"tropical\",\n degreeValue, color=QColor(Qt.blue))\n \n if True:\n degreeValue = 180\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Moon\", \"geocentric\", \"tropical\",\n \"Sun\", \"geocentric\", \"tropical\",\n degreeValue, color=QColor(Qt.red))\n \n #success = PlanetaryCombinationsLibrary.addGeoLongitudeVelocityLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Mercury\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoLongitudeVelocityLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Venus\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoLongitudeVelocityLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Mars\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoLongitudeVelocityLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Uranus\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoLongitudeVelocityLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Saturn\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoLongitudeVelocityLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"MeanOfFive\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoLongitudeVelocityLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"CycleOfEight\", \n # color=None, stepSizeTd=stepSizeTd)\n\n #success = PlanetaryCombinationsLibrary.addGeoDeclinationLines(\\\n # pcdd, startDt, endDt, highPrice=700, lowPrice=660,\n # planetName=\"Moon\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoDeclinationLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Mercury\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoDeclinationLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Venus\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoDeclinationLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Mars\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoDeclinationLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Jupiter\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoDeclinationLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Saturn\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoDeclinationLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Uranus\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoDeclinationLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Neptune\", \n # color=None, stepSizeTd=stepSizeTd)\n #success = PlanetaryCombinationsLibrary.addGeoDeclinationLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Pluto\", \n # color=None, stepSizeTd=stepSizeTd)\n\n \n p = 1000\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H1\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H2\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H3\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H4\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H5\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H6\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H7\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H8\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H9\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H10\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H11\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"H12\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"ARMC\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Vertex\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"EquatorialAscendant\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"CoAscendant1\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"CoAscendant2\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"PolarAscendant\")\n #p += 20\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Sun\")\n #p += 200\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Moon\")\n #p += 200\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Mercury\")\n #p += 200\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Venus\")\n #p += 200\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Mars\")\n #p += 200\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Jupiter\")\n #p += 200\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Saturn\")\n #p += 200\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Uranus\")\n #p += 200\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Neptune\")\n #p += 200\n #success = PlanetaryCombinationsLibrary.\\\n # addTimeMeasurementAndTiltedTextForNakshatraTransits(\n # pcdd, startDt, endDt, price=p, planetName=\"Pluto\")\n #p += 200\n\n\n\n #success = PlanetaryCombinationsLibrary.\\\n # addZeroDeclinationVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice, planetName=\"Venus\")\n \n #success = PlanetaryCombinationsLibrary.\\\n # addDeclinationVelocityPolarityChangeVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice, planetName=\"Venus\")\n \n #success = PlanetaryCombinationsLibrary.\\\n # addGeoLongitudeElongationVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice, planetName=\"Venus\")\n \n #success = PlanetaryCombinationsLibrary.\\\n # addGeoLongitudeElongationVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice, planetName=\"Mercury\")\n \n #success = PlanetaryCombinationsLibrary.\\\n # addContraparallelDeclinationAspectVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planet1Name=\"Venus\", planet2Name=\"Mars\")\n \n #success = PlanetaryCombinationsLibrary.\\\n # addParallelDeclinationAspectVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planet1Name=\"Venus\", planet2Name=\"Mars\")\n \n #success = PlanetaryCombinationsLibrary.\\\n # addPlanetOOBVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Venus\")\n \n #success = PlanetaryCombinationsLibrary.\\\n # addGeoLatitudeLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Venus\")\n #success = PlanetaryCombinationsLibrary.\\\n # addGeoLatitudeLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Jupiter\", stepSizeTd=datetime.timedelta(days=7))\n #success = PlanetaryCombinationsLibrary.\\\n # addGeoLatitudeLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Saturn\", stepSizeTd=datetime.timedelta(days=7))\n #success = PlanetaryCombinationsLibrary.\\\n # addGeoLatitudeLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Uranus\", stepSizeTd=datetime.timedelta(days=7))\n \n #success = PlanetaryCombinationsLibrary.\\\n # addZeroGeoLatitudeVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planetName=\"Venus\")\n\n #success = PlanetaryCombinationsLibrary.\\\n # addGeoLatitudeVelocityPolarityChangeVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice, planetName=\"Venus\")\n \n #success = PlanetaryCombinationsLibrary.\\\n # addContraparallelGeoLatitudeAspectVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planet1Name=\"Venus\", planet2Name=\"Mars\")\n \n #success = PlanetaryCombinationsLibrary.\\\n # addParallelGeoLatitudeAspectVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # planet1Name=\"Venus\", planet2Name=\"Mars\")\n \n #success = PlanetaryCombinationsLibrary.\\\n # addPlanetLongitudeTraversalIncrementsVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Venus\", \"geocentric\", \"sidereal\", \n # planetEpocDt=datetime.datetime(year=1976, month=4, day=1,\n # hour=13, minute=0, second=0,\n # tzinfo=pytz.utc),\n # degreeIncrement=18)\n\n #success = PlanetaryCombinationsLibrary.\\\n # addPlanetLongitudeTraversalIncrementsVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Venus\", \"heliocentric\", \"sidereal\", \n # planetEpocDt=datetime.datetime(year=1970, month=3, day=21,\n # hour=0, minute=0, second=0,\n # tzinfo=pytz.utc),\n # degreeIncrement=30)\n\n #success = PlanetaryCombinationsLibrary.\\\n # addPlanetLongitudeTraversalIncrementsVerticalLines(\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Sun\", \"geocentric\", \"tropical\", \n # planetEpocDt=datetime.datetime(year=1970, month=3, day=21,\n # hour=6, minute=0, second=0,\n # tzinfo=pytz.utc),\n # degreeIncrement=15)\n\n #success = PlanetaryCombinationsLibrary.\\\n # addGeoLongitudeVelocityPolarityChangeVerticalLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Mercury\")\n\n ############################################################################\n\n # Silver responds to this combination very well.\n if False:\n success = PlanetaryCombinationsLibrary.\\\n addGeoJupiterSaturn15xVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice)\n\n \n # Worth looking closer into, as it hits turns perfectly at some\n # places, and then it is a little off, and then it stops working,\n # then it starts working again. There is porbably some other\n # factor I need to take into account. Note that the 150-degree\n # aspect between these two seems to work pretty well.\n # \n # I should try to filter by season, or perhaps filter by whether\n # it is positive declination or negative declination, whether it\n # is positive latitude or negative latitude, or perhaps see how it\n # needs to be relative to some other planet (Sun or Pluto?). Also\n # check to see if there is a certain effect in a bull market\n # vs. bear market.\n # \n #\n if False:\n step = 15\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Venus\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n # Did not work too well.\n #step = 40\n #start = 0\n #stop = 180\n #degreeDiff = start\n #while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n # success = PlanetaryCombinationsLibrary.\\\n # addLongitudeAspectVerticalLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Venus\", \"geocentric\", \"sidereal\",\n # \"Venus\", \"heliocentric\", \"sidereal\",\n # degreeDiff)\n # degreeDiff += step\n\n # This is definitely worth looking at more closely.\n if False:\n step = 18\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Venus\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Meh, some hits, some misses.\n if False:\n step = 12\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Venus\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # It'll hit a turn perfectly 3 times or so, and then stop working\n # for a while, and then work again. Overall, this mostly has\n # misses though.\n if False:\n step = 11.25\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Venus\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Does not work so well.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"tropical\",\n \"Venus\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # This marks some turns, so come back and investigate a little\n # more closely.\n if False:\n step = 15\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"geocentric\", \"sidereal\",\n \"Venus\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Probably not worth studying further. \n # On this aspect, there are big moves sometimes (eyeballing it,\n # maybe about 10% of the time?). It's not clear that this defines\n # a change in trend though.\n if False:\n step = 15\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"geocentric\", \"tropical\",\n \"Venus\", \"geocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # Generally, this does not work so well.\n # 108-degree aspects did show up a few times at tops and bottoms,\n # so maybe that particular aspect should be looked at more\n # closely.\n if False:\n step = 18\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"geocentric\", \"tropical\",\n \"Venus\", \"geocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # Works well during very active markets.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"geocentric\", \"tropical\",\n \"Venus\", \"geocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # Works well during very active markets.\n if False:\n step = 360 / 14.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"geocentric\", \"tropical\",\n \"Venus\", \"geocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n\n # Doesn't work very well.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Saturn\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Good for some triggers.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Saturn\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # There are few turns that this coincides with, but overall, it\n # does not work that well.\n if False:\n step = 360 / 5.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Saturn\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This cycle works pretty well!!! Worth investigating\n # further to add a trigger.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Saturn\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This didn't work so well.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Saturn\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # It does mark some turns, but it doesn't always work and the\n # turns are not very large.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"sidereal\",\n \"Saturn\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Overall, this doesn't work very well.\n if False:\n step = 360 / 5.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Jupiter\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This marks some tops and bottoms. Worth investigating.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Jupiter\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Meh... Not that strong. Not worth investigating further.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Jupiter\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Worth investigating closer.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Jupiter\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This is a somewhat consistent cycle in Silver, but a trigger\n # needs to be added. Investigate further.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Jupiter\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # You can see it's affects, but it has a good amount of misses too.\n # Investigate further.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"sidereal\",\n \"Jupiter\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Works very well. I will need to apply a filter, but this works\n # very nicely.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Uranus\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # I think the 90-step works better than the 45-step.\n if False:\n step = 45\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Uranus\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This works nicely too. Worth investigating further.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Uranus\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This works pretty well. Worth investigating further.\n if False:\n step = 360 / 5.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Uranus\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Works okay when active. It would need a trigger added though.\n # Using geo-geo is probably better, but I would need to check that.\n if False:\n step = 360 / 5.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Uranus\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Some tops and bottoms, but they are mostly few.\n # I think geocentric works better than helio to helio.\n if False:\n step = 360 / 5.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"sidereal\",\n \"Uranus\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n\n # This times some turns, but overall, not very consistent.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Uranus\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Does not work that well.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"sidereal\",\n \"Uranus\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Shows up at some tops and bottoms. Probably worth investigating further.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"sidereal\",\n \"Uranus\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n\n # This is a cycle in silver. Works very nicely when active.\n # Trigger may need to be added.\n if False:\n step = 360 / 5.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"tropical\",\n \"Pluto\", \"geocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # This works pretty well; just needs triggers defined for it.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"tropical\",\n \"Pluto\", \"geocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # This works pretty well too. Just needs triggers defined for it.\n # Need to determine which is better for this aspect set, Venus\n # aspecting Geo Pluto or Venus aspecting Helio Pluto?\n # rluu: Just plot both to be aware of each.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"tropical\",\n \"Pluto\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # This combination may just be a trigger and not an actual cycle.\n #step = 15\n #start = 0\n #stop = 180\n #degreeDiff = start\n #while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n # success = PlanetaryCombinationsLibrary.\\\n # addLongitudeAspectVerticalLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Venus\", \"geocentric\", \"sidereal\",\n # \"Neptune\", \"heliocentric\", \"sidereal\",\n # degreeDiff)\n # degreeDiff += step\n\n\n #step = 15\n #start = 0\n #stop = 180\n #degreeDiff = start\n #while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n # success = PlanetaryCombinationsLibrary.\\\n # addLongitudeAspectVerticalLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Venus\", \"geocentric\", \"sidereal\",\n # \"Neptune\", \"heliocentric\", \"sidereal\",\n # degreeDiff)\n # degreeDiff += step\n \n # Worth investigating further. The trigger needs to be defined.\n #step = 15\n #start = 0\n #stop = 180\n #degreeDiff = start\n #while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n # success = PlanetaryCombinationsLibrary.\\\n # addLongitudeAspectVerticalLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Mars\", \"geocentric\", \"sidereal\",\n # \"Pluto\", \"geocentric\", \"sidereal\",\n # degreeDiff)\n # degreeDiff += step\n\n\n # \n #step = 15\n #start = 0\n #stop = 180\n #degreeDiff = start\n #while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n # success = PlanetaryCombinationsLibrary.\\\n # addLongitudeAspectVerticalLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Pluto\", \"heliocentric\", \"sidereal\",\n # \"Mars\", \"geocentric\", \"sidereal\",\n # degreeDiff)\n # degreeDiff += step\n\n\n # Had no effect.\n #step = 15\n #start = 0\n #stop = 180\n #degreeDiff = start\n #while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n # success = PlanetaryCombinationsLibrary.\\\n # addLongitudeAspectVerticalLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Uranus\", \"geocentric\", \"tropical\",\n # \"Neptune\", \"geocentric\", \"tropical\",\n # degreeDiff)\n # degreeDiff += step\n\n\n # This catches some large and medium turns nicely, but also has\n # some smaller turns and misses. Is this just a triggering\n # combination? Investigate to see if an additional trigger could\n # be utilized.\n #step = 15\n #start = 0\n #stop = 180\n #degreeDiff = start\n #while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n # success = PlanetaryCombinationsLibrary.\\\n # addLongitudeAspectVerticalLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Mars\", \"geocentric\", \"tropical\",\n # \"Isis\", \"geocentric\", \"tropical\",\n # degreeDiff)\n # degreeDiff += step\n \n\n # This catches some large and medium turns nicely, but also has\n # some smaller turns and misses. Is this just a triggering\n # combination? Investigate to see if an additional trigger could\n # be utilized.\n #step = 15\n #start = 0\n #stop = 180\n #degreeDiff = start\n #while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n # success = PlanetaryCombinationsLibrary.\\\n # addLongitudeAspectVerticalLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Mars\", \"geocentric\", \"tropical\",\n # \"Isis\", \"heliocentric\", \"tropical\",\n # degreeDiff)\n # degreeDiff += step\n\n\n # This works well from about mid 2003 to end of 2008 (making well\n # defined turns). Before 2003, it is either really sloppy or\n # doesn't catch turns. Intervals between each aspect ranges from about\n # 4 months to ~2.25 years.\n #step = 7.5\n #start = 0\n #stop = 180\n #degreeDiff = start\n #while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n # success = PlanetaryCombinationsLibrary.\\\n # addLongitudeAspectVerticalLines(\\\n # pcdd, startDt, endDt, highPrice, lowPrice,\n # \"Neptune\", \"geocentric\", \"tropical\",\n # \"Isis\", \"geocentric\", \"tropical\",\n # degreeDiff)\n # degreeDiff += step\n\n\n # Works pretty decently.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mercury\", \"geocentric\", \"sidereal\",\n \"Pluto\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Doesn't work that well.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mercury\", \"geocentric\", \"sidereal\",\n \"Pluto\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Coincided with some turns, but overall, this is not so good.\n if False:\n step = 360 / 5.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mercury\", \"geocentric\", \"sidereal\",\n \"Pluto\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n # Coincided with some turns, but overall, this is not so good.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mercury\", \"geocentric\", \"sidereal\",\n \"Pluto\", \"geocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n # This might be a very short cycle in silver.\n # At times it works really well, and at other times, it is not so good.\n # Maybe this is just a triggering combination.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mercury\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This might be a very short cycle in silver.\n # At times it works really well, and at other times, it is not so good.\n # Maybe this is just a triggering combination.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mercury\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # There are a lot of aspects created with this set, but it may be\n # worth exploring as a trigger since some hit the highs and lows.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mercury\", \"heliocentric\", \"tropical\",\n \"Pluto\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # There are some hits that coincide with turns, but \n # generally this does not work too well.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mercury\", \"geocentric\", \"tropical\",\n \"Pluto\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # Generally, this does not work that well.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Investigate further. This may be an important cycle for silver!\n # You can see that prices either accelerate into the 120 degree\n # aspect, or accelerate after the 120 degree aspect, whenever this\n # aspect (planet combination) is active.\n # For the steps of 120, sometimes they mark turns (if dasa is active?),\n # and other times, nothing.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This looks to be an important combination in silver, although only\n # when it is 'active'.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Earth\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This looks to be an important combination in silver, although only\n # when it is 'active'.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Earth\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This is definitely worth keeping an eye on. Especially when\n # hits are dead on, like in the 1997 to 1999 period. It marks\n # very good 'time' for when a move is up.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Earth\", \"heliocentric\", \"tropical\",\n \"Pluto\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # Does not work that well.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Overall it does not seem to work too consistently, but some of\n # these trines, when they do catch, it is a very big turns.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Generally doesn't work too well. There were a few hits, but\n # overall a low percentage.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"heliocentric\", \"tropical\",\n \"Pluto\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # Does not coincide with any notable turns.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Jupiter\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Does not coincide with any notable turns.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Jupiter\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Does not coincide with any notable turns.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Jupiter\", \"heliocentric\", \"tropical\",\n \"Pluto\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # Does not coincide with any notable turns.\n if False:\n step = 15\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Uranus\", \"heliocentric\", \"sidereal\",\n \"Pluto\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Definitely a trigger combination for large turns, when a cycle is due.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"sidereal\",\n \"Isis\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n # Definitely a trigger combination for large turns, when a cycle is due.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"sidereal\",\n \"Isis\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This works as a cycle so-so. It does mark turns and also\n # periods when 'time' is up. The negative is that the time ranges\n # that it does not work is pretty wide. Although this cycle does\n # not appear to be a very strong. Worth looking at again.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"sidereal\",\n \"Isis\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This marks some tops and bottoms, and also periods when 'time' is up.\n # Worth investigating further.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"tropical\",\n \"Isis\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # At times it looks like there is something to this, but then\n # again it is also quite sloppy... so much so that there may not\n # be something here at all. The turns it catches are not really\n # major turns. At times it does not mark any turns at all.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"geocentric\", \"tropical\",\n \"Isis\", \"geocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # In this, there are places where it marks that 'time' is up, but\n # there are also a lot of misses. Worth looking at again later.\n # I might ask, what is the longitude difference between\n # heliocentric and geocentric Transpluto? \n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"tropical\",\n \"Isis\", \"geocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n \n # Important (trigger?) combination for silver.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Earth\", \"heliocentric\", \"sidereal\",\n \"Isis\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This is a trigger combination that doesn't work very often, and\n # when it does it coincides with turns that are very weak. It's\n # probably better not to utilize this one.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Earth\", \"heliocentric\", \"sidereal\",\n \"Isis\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # This marks some spots when 'time' is up, but there are also\n # a lot of places where it did not clearly have a move afterwards.\n # Probably not worth investigating too much further.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Earth\", \"heliocentric\", \"tropical\",\n \"Isis\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # Worth keeping this on the radar. This marks powerful energy\n # energy surges when the market is trending. There are some misses though.\n # Worth investigating further. \n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Earth\", \"heliocentric\", \"tropical\",\n \"Isis\", \"geocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n \n # Important trigger combination for silver.\n # Some big reversals are marked with this combination.\n # An additional trigger may be needed.\n if False:\n step = 90\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"heliocentric\", \"sidereal\",\n \"Isis\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Eye-balling this one, about 25% occurances coincided with turns.\n # Of the ones that did coincide with a turn, about half were\n # close to exact, and about half probably need an additional trigger.\n if False:\n step = 120\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"heliocentric\", \"sidereal\",\n \"Isis\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n # Eye-balling this one, maybe about 5% coincided with tops or\n # bottoms. The action caused by this combination isn't very clear\n # at all; overall it is kind of weak. Probably not worth\n # investigating into this one.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"heliocentric\", \"tropical\",\n \"Isis\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # When the market is active and trending, this will coincide with\n # some tops and bottoms (eye-balling: maybe 20% of them while\n # trending), and maybe about 5% overall. Probably not worth\n # investigating into this one further.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"heliocentric\", \"tropical\",\n \"Isis\", \"geocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n\n # Coincides with some nice tops and bottoms when silver is trending.\n # This is probably a trigger. Worth investigating closer.\n if False:\n step = 360 / 7.0\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"geocentric\", \"tropical\",\n \"Isis\", \"heliocentric\", \"tropical\",\n degreeDiff)\n degreeDiff += step\n \n ############################################################################\n # Heliocentric synodic combinations with Earth.\n #\n # For these, count the number of iterations. For Jupiter, I\n # noticed that 24 synodic iterations later, there is a major high\n # coinciding with it again. Also, the synodic Mars with Earth hit\n # once at the same time as synodic Jupiter with Earth, and that\n # was a huge high. There is probably value to looking at all\n # these at once to see if any light can be seen as to how these\n # synodics (or cycles) work with each other.\n #\n\n \n # If active, it coincides with sharp turns. If not active, then\n # there will need to be an orb and another trigger, but you can\n # definitely seen the rhythm of Mercury. Something that may be\n # helpful to watch to see patterns as they develop.\n if False:\n step = 360\n start = 0\n stop = 0\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mercury\", \"heliocentric\", \"sidereal\",\n \"Earth\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n if False:\n step = 360\n start = 0\n stop = 0\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Venus\", \"heliocentric\", \"sidereal\",\n \"Earth\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n if False:\n step = 360\n start = 0\n stop = 0\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Mars\", \"heliocentric\", \"sidereal\",\n \"Earth\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n if False:\n step = 360\n start = 0\n stop = 0\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Jupiter\", \"heliocentric\", \"sidereal\",\n \"Earth\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n if False:\n step = 360\n start = 0\n stop = 0\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Saturn\", \"heliocentric\", \"sidereal\",\n \"Earth\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n if False:\n step = 360\n start = 0\n stop = 0\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Uranus\", \"heliocentric\", \"sidereal\",\n \"Earth\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n if False:\n step = 360\n start = 0\n stop = 0\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Neptune\", \"heliocentric\", \"sidereal\",\n \"Earth\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n if False:\n step = 360\n start = 0\n stop = 0\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Pluto\", \"heliocentric\", \"sidereal\",\n \"Earth\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n if False:\n step = 360\n start = 0\n stop = 0\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Chiron\", \"heliocentric\", \"sidereal\",\n \"Earth\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n \n if False:\n step = 360\n start = 0\n stop = 0\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n success = PlanetaryCombinationsLibrary.\\\n addLongitudeAspectVerticalLines(\\\n pcdd, startDt, endDt, highPrice, lowPrice,\n \"Isis\", \"heliocentric\", \"sidereal\",\n \"Earth\", \"heliocentric\", \"sidereal\",\n degreeDiff)\n degreeDiff += step\n\n ############################################################################\n\n # Testing new functions for longitude aspect timestamps.\n if False:\n aspectGroup = []\n step = 180\n start = 0\n stop = 180\n degreeDiff = start\n while degreeDiff < stop or Util.fuzzyIsEqual(degreeDiff, stop):\n aspectGroup.append(degreeDiff)\n degreeDiff += step\n\n planet1ParamsList = [(\"Venus\", \"geocentric\", \"sidereal\")]\n planet2ParamsList = [(\"Uranus\", \"geocentric\", \"sidereal\")]\n uniDirectionalAspectsFlag = True\n \n for aspect in aspectGroup:\n degreeDifference = aspect\n \n # Get the timestamps of the aspect.\n timestamps = \\\n PlanetaryCombinationsLibrary.getLongitudeAspectTimestamps(\\\n pcdd, startDt, endDt,\n planet1ParamsList,\n planet2ParamsList,\n degreeDifference,\n uniDirectionalAspectsFlag)\n \n # Get the tag str for the aspect.\n tag = \\\n PlanetaryCombinationsLibrary.getTagNameForLongitudeAspect(\\\n planet1ParamsList,\n planet2ParamsList,\n degreeDifference,\n uniDirectionalAspectsFlag)\n \n # Get the color to apply.\n from astrologychart import AstrologyUtils\n color = AstrologyUtils.\\\n getForegroundColorForPlanetName(planet1ParamsList[0][0])\n \n # Draw the aspects.\n for dt in timestamps:\n PlanetaryCombinationsLibrary.addVerticalLine(\\\n pcdd, dt, highPrice, lowPrice, tag, color)\n \n log.info(\"Added {} artifacts for aspect {} degrees.\".\\\n format(len(timestamps), degreeDifference))\n success = True\n \n ############################################################################\n\n if success == True:\n log.debug(\"Success!\")\n rv = 0\n else:\n log.debug(\"Failure!\")\n rv = 1\n\n return rv" ]
[ "0.553893", "0.5346823", "0.52831537", "0.50846463", "0.49604708", "0.48745608", "0.48555076", "0.485055", "0.4848358", "0.4832407", "0.47853115", "0.47777563", "0.47689658", "0.47675616", "0.47572455", "0.474961", "0.4736425", "0.47201127", "0.4714435", "0.47010985", "0.4685433", "0.46528113", "0.46408987", "0.4638567", "0.46346027", "0.4631739", "0.4628875", "0.46147752", "0.46054053", "0.4568641", "0.45656216", "0.45619163", "0.45574227", "0.4546786", "0.453835", "0.45282614", "0.4522631", "0.45071694", "0.44983912", "0.44935527", "0.44715527", "0.4459354", "0.44416964", "0.44356045", "0.44203854", "0.4419478", "0.4412664", "0.44124287", "0.4408247", "0.44071567", "0.4399507", "0.43963364", "0.43958563", "0.4392358", "0.43911314", "0.43886438", "0.4385025", "0.4377343", "0.4376979", "0.43749174", "0.43748826", "0.43651524", "0.4360921", "0.43583032", "0.43571594", "0.435681", "0.43482605", "0.43456304", "0.4339003", "0.43332773", "0.43316004", "0.43305308", "0.43208477", "0.43134382", "0.43108228", "0.4309269", "0.43083853", "0.43032598", "0.43032458", "0.43007267", "0.4300564", "0.42981988", "0.42973492", "0.42962947", "0.4294656", "0.4289444", "0.42884627", "0.4288308", "0.42848375", "0.4283453", "0.4278004", "0.4277759", "0.4272443", "0.42679307", "0.42639318", "0.42593378", "0.42575863", "0.42572442", "0.42544004", "0.42505157" ]
0.5686502
0
Add an edge to DCEL if it doesn't already exists, otherwise return the existing edge.
def add_edge(self, edge): try: edge_idx = self.edges.index(edge) return self.edges[edge_idx] except Exception: self.edges.append(edge) return edge
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_edge(self, edge):\n\n add_egde = True\n for edge_this in self.edges:\n if edge_this == edge:\n add_egde = False\n\n if add_egde:\n self.edges.append(edge)\n\n return self", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.graph_dict:\n self.graph_dict[vertex1].append(vertex2)\n else:\n self.graph_dict[vertex1] = [vertex2]\n return edge", "def add_edge(self, edge):\n if(self.has_edge(edge) == 0):\n self.__graph_dict[edge[0]].append(edge[1])", "def add_edge(self, edge: e.Edge) -> None:\n if edge not in self.edges:\n self.edges.append(edge)\n self.num_edges = self.num_edges + 1", "def add_edge(self, edge):\n assert edge not in self.edges\n self.edges.append(edge)", "def add_edge(self,_check_existing=True,**kwargs):\n j=None\n if '_index' in kwargs:\n j=kwargs.pop('_index')\n if j==len(self.edges):\n # this is the index we'd get anyway.\n j=None\n else:\n assert len(self.edges)>j\n assert self.edges[j]['deleted']\n\n if _check_existing:\n j_exists=self.nodes_to_edge(*kwargs['nodes'])\n if j_exists is not None:\n raise GridException(\"Edge already exists\")\n \n if j is None:\n e=np.zeros( (),dtype=self.edge_dtype)\n self.edges=array_append(self.edges,e)\n j=len(self.edges)-1\n\n # default values\n self.edges[j]['cells'][:]=-1\n self.edges[j]['deleted']=False\n\n for k,v in six.iteritems(kwargs):\n self.edges[k][j]=v\n\n # most basic checks on edge validity:\n if self.edges[j]['nodes'][0]==self.edges[j]['nodes'][1]:\n raise self.InvalidEdge('duplicate nodes')\n\n if self._node_to_edges is not None:\n n1,n2=self.edges['nodes'][j]\n self._node_to_edges[n1].append(j)\n self._node_to_edges[n2].append(j)\n\n self.push_op(self.unadd_edge,j)\n return j", "def add_edge(self, edge):\n\n if edge.uuid is None:\n edge.uuid = self._generate_uuid()\n\n if edge.uuid in self._edges:\n error_str = \"Trying to add an already existing edge with uuid: \"\\\n + str(edge.uuid)\n raise KeyError(error_str)\n\n self._edges[edge.uuid] = Edge.from_edge(edge)\n\n return edge.uuid", "def add_edge(self, edge, directed=False, auto=False):\n (v1, v2) = edge.get_id()\n if v1 in self.vertices.keys() and v2 in self.vertices.keys():\n if directed:\n if auto:\n self.edges[edge.get_id()] = edge\n else:\n if v1 != v2:\n self.edges[edge.get_id()] = edge\n else:\n if self.edges.get((v2, v1)) is None:\n if auto:\n self.edges[edge.get_id()] = edge\n else:\n if v1 != v2:\n self.edges[edge.get_id()] = edge", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.__graph_dict:\n self.__graph_dict[vertex1].append(vertex2)\n else:\n self.__graph_dict[vertex1] = [vertex2]", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.graph_dict:\n self.graph_dict[vertex1].append(vertex2)\n else:\n self.graph_dict[vertex1] = [vertex2]", "def addEdge(self, edge):\n Digraph.addEdge(self, edge)\n rev = Edge(edge.getDestination(), edge.getSource())\n Digraph.addEdge(self, rev)", "def add_edge(self, edge):\n self[edge[0]][edge[1]] = edge\n self[edge[1]][edge[0]] = edge", "def _add_edge(self, a, b):\n e = Edge2(a, b)\n i = bisect(self.edges, e)\n \n # if edge between these vertices exists just return it\n if len(self.edges) > i and self.edges[i] == e:\n return self.edges[i]\n \n # otherwise add new edge in sorted position and return it\n self.edges.insert(i, e)\n return e", "def add_edge(self, edge):\r\n edge = set(edge)\r\n (vertex1, vertex2) = tuple(edge)\r\n \r\n if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():\r\n if vertex2 in self.__graph_dict[vertex1] and vertex1 in self.__graph_dict[vertex2]:\r\n return\r\n self.__graph_dict[vertex1].add(vertex2)\r\n self.__graph_dict[vertex2].add(vertex1)\r\n elif vertex1 not in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():\r\n self.__graph_dict[vertex1] = {vertex2}\r\n self.__graph_dict[vertex2].add(vertex1)\r\n elif vertex1 in self.__graph_dict.keys() and vertex2 not in self.__graph_dict.keys():\r\n self.__graph_dict[vertex2] = {vertex1}\r\n self.__graph_dict[vertex1].add(vertex2)\r\n else:\r\n self.__graph_dict[vertex1] = {vertex2}\r\n self.__graph_dict[vertex2] = {vertex1}", "def add_edge(self, edge):\n src = edge.get_source()\n dest = edge.get_destination()\n #weightEdge = WeightedEdge(src, dest, edge.get_total_distance(), edge.get_outdoor_distance())\n if not (src in self.edges and dest in self.edges):\n raise ValueError('Node not in graph')\n self.edges[src].append(dest)\n #self.edges[src].append(weightEdge)", "def addEdge(self, e):\n v = e.either()\n w = e.other(v)\n self._validateVertex(v)\n self._validateVertex(w)\n self._adj[v].add(e)\n self._adj[w].add(e)\n self._E += 1", "def addEdge(self, edge):\n src = edge.getSource()\n dest = edge.getDestination()\n if not (src in self.edges and dest in self.edges):\n raise ValueError('Node not in graph')\n self.edges[src].append(dest)", "def add_edge(self, ed):\n self.edge.append(ed)\n self.update_node2edge()", "def add_edge(self, e):\n a, b = e\n self[a][b] = e\n self[b][a] = e", "def addEdge(self, edge: Edge):\n self.edges.append(edge)", "def add_edge(self, edge_name, edge, overwrite=False):\n if (edge_name in self.edges) and (not overwrite):\n raise ValueError(\"Attempted to overwrite the edge \" + edge_name + \".\")\n else:\n self.edges[edge_name] = edge\n self.build_order += [edge_name]", "def add_neighbor(self, edge: \"Edge\") -> None:\r\n if edge is None or (edge.source != self and edge.target != self):\r\n return\r\n\r\n if edge.source == self:\r\n other: Node = edge.target\r\n elif edge.target == self:\r\n other: Node = edge.source\r\n else:\r\n raise ValueError(\"Tried to add a neighbor with an invalid edge.\")\r\n\r\n edge_key: Tuple(int, int) = edge.key\r\n\r\n # The graph is considered undirected, check neighbor existence accordingly.\r\n if self._neighbors.get(edge_key) or self._neighbors.get((edge_key[1], edge_key[0])):\r\n return # The neighbor is already added.\r\n\r\n self._neighbors[edge_key] = edge\r\n self.dispatch_event(NeighborAddedEvent(other))", "def add_edge(self, e):\n x = min(e)\n y = max(e)\n if x not in self._vertices:\n self.add_vertex(x)\n if y not in self._vertices:\n self.add_vertex(y)\n self._edges.add( (x, y) )", "def add_edge(self, edge=None):\n src_key, dest_key = (edge['src_key'], edge['dest_key'])\n if dest_key is self.ROOT_TASK_KEY:\n raise Exception(\"Root task can not be an edge dest\")\n edge_key = (src_key, dest_key)\n self._edges[edge_key] = edge\n self._edges_by_key[src_key]['outgoing'][edge_key] = edge\n self._edges_by_key[dest_key]['incoming'][edge_key] = edge", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 not in self.__graph_dict:\n self.__graph_dict[vertex1] = []\n dbg_str = \"Vertex being initialized ..\" + str(vertex1)\n # logging.debug(dbg_str)\n if vertex2 not in self.__graph_dict:\n self.__graph_dict[vertex2] = []\n dbg_str = \"Vertex being initialized ..\" + str(vertex2)\n # logging.debug(dbg_str)\n if vertex2 not in self.__graph_dict[vertex1]:\n self.__graph_dict[vertex1].append(vertex2)\n dbg_str = \"Appending .. \" + str(vertex2), \"to ->\" +str(vertex1)\n # logging.debug(dbg_str)\n\n if vertex1 not in self.__graph_dict[vertex2]:\n self.__graph_dict[vertex2].append(vertex1)\n dbg_str = \"Appending .. \" + str(vertex1), \"to ->\" +str(vertex2)\n # logging.debug(dbg_str)", "def add_edge(self, e):\n v, w = e\n self[v][w] = e\n self[w][v] = e", "def addEdge(self, edge):\n\n startVertex = edge.startVertex\n endVertex = edge.endVertex\n\n startVertexNumber = startVertex.vertexNumber\n endVertexNumber = endVertex.vertexNumber\n \n vertexIndex = self.vertexIndex\n parentIndex = self.parentIndex\n parentEdgeIndex = self.parentEdgeIndex\n\n if startVertexNumber == endVertexNumber:\n raise EdgeError(startVertexNumber, endVertexNumber, ErrorMessages.noSelfLoops)\n\n try:\n parentIndex[startVertexNumber].index(endVertexNumber)\n raise EdgeError(startVertexNumber, endVertexNumber, ErrorMessages.edgeAlreadyExists)\n except (ValueError, KeyError):\n self.__lastEdgeNumber += 1\n self.edgeIndex[self.__lastEdgeNumber] = edge\n \n if startVertexNumber not in vertexIndex:\n vertexIndex[startVertexNumber] = startVertex\n\n if endVertexNumber not in vertexIndex:\n vertexIndex[endVertexNumber] = endVertex\n\n if startVertexNumber not in parentIndex:\n parentIndex[startVertexNumber] = [endVertexNumber]\n else:\n parentIndex[startVertexNumber].append(endVertexNumber)\n\n if endVertexNumber not in parentIndex:\n parentIndex[endVertexNumber] = [startVertexNumber]\n else:\n parentIndex[endVertexNumber].append(startVertexNumber)\n\n if startVertexNumber not in parentEdgeIndex:\n parentEdgeIndex[startVertexNumber] = [[endVertexNumber, self.__lastEdgeNumber]]\n else:\n parentEdgeIndex[startVertexNumber].append([endVertexNumber, self.__lastEdgeNumber])\n\n if endVertexNumber not in parentEdgeIndex:\n parentEdgeIndex[endVertexNumber] = [[startVertexNumber, self.__lastEdgeNumber]]\n else:\n parentEdgeIndex[endVertexNumber].append([startVertexNumber, self.__lastEdgeNumber])\n \n try:\n self.__degreeCount[startVertexNumber] += 1\n except KeyError:\n self.__degreeCount[startVertexNumber] = 1\n \n try:\n self.__degreeCount[endVertexNumber] += 1\n except KeyError:\n self.__degreeCount[endVertexNumber] = 1", "def _add_edge(self, graph: Graph, vertex1: Vertex, vertex2: Vertex) \\\n -> None:\n new_edge = Edge(vertex1, vertex2)\n graph.add(new_edge)", "def addEdge(self, startNode, endNode):\n if self.directedOrUndirected == 'undirected':\n # no need to check if edge already exists because we're using\n # defaultdict\n\n self.graph[startNode].append(endNode)\n self.graph[endNode].append(startNode)\n else:\n self.graph[startNode].append(endNode)", "def add_edge(self, current_id=None, in_vertex_id=None, out_vertex_id=None, label=None, properties=None):\n if current_id is None:\n done = False\n while not done:\n next_id = self.get_next_id()\n\n if next_id not in self.edges:\n current_id = next_id\n done = True\n else:\n if current_id in self.edges:\n raise Exception('Edge with ID Already Exist')\n\n in_vertex = self.vertices.get(in_vertex_id)\n out_vertex = self.vertices.get(out_vertex_id)\n\n if out_vertex is None or in_vertex is None:\n raise Exception('In_vertex or out_vertex not found')\n\n current_edge = Edge(self, current_id,\n label=label,\n in_vertex=in_vertex,\n out_vertex=out_vertex,\n properties=properties)\n\n self.edges[current_id] = current_edge\n in_vertex.add_out_edge(label, current_edge)\n out_vertex.add_in_edge(label, current_edge)\n return current_edge", "def add_edge(i, j):\n if (i, j) in edges or (j, i) in edges:\n # Si ya esta agregado en la lista no agrega nada\n return\n edges.add( (i, j) )\n edge_points.append(points[ [i, j] ])", "def add_edge(self, edge):\n\n if len(self.__adjacency_matrix) == 0: # create an empty_adjacency_matrix & adjacency_matrix_dictionary if they doesnt exist yet\n for dimension in range(1, self.__num_nodes + 1):\n self.__adjacency_matrix.append([0] * self.__num_nodes) # size of matrix is (num_nodes) * (num_nodes)\n for i in range(0, self.__num_nodes):\n self.__adjacency_matrix_dictionary[self.__nodes[i]] = {} # add empty dictionary elements for each node\n\n self.__adjacency_matrix_dictionary[self.__nodes[edge.get_node_a().get_id()]][\n self.__nodes[edge.get_node_b().get_id()]] = edge\n self.__adjacency_matrix_dictionary[self.__nodes[edge.get_node_b().get_id()]][\n self.__nodes[edge.get_node_a().get_id()]] = edge # add edge to both directions\n\n self.__edges.append(edge)\n self.__num_edges += 1", "def addEdge(self, x, y,z):\n if self.isEdge(x,y)==False:\n self.dictOut[x].append(y)\n self.dictIn[y].append(x)\n self.listOfEdges.append((x, y))\n self.dictCosts[(x,y)]=z\n else:\n print(\"Allready is an edge between them!\")", "def add_edge(self, v1, v2):\n # Check if they exist\n # if v1 in self.vertices and v2 in self.vertices:\n if v1 in self.vertices:\n # Add the edge\n self.vertices[v1].add(v2)\n else:\n print(f\"ERROR ADDING EDGE between {v1} and {v2} : Vertex not found\")", "def add_directed_edge(self, v1, v2):\n if v1 in self.vertices:\n self.vertices[v1].edges.add(v2)\n else:\n raise IndexError(\"That vertex does not exist!\")", "def save_edge(self, edge: Edge) -> Edge:", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].edges.add(v2)\n self.vertices[v2].edges.add(v1)\n else:\n raise IndexError(\"That vertex does not exist!\")", "def addEdge(self,x,y):\n\t\tself._dict[x].append(y)", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n print(\"ERROR ADDING EDGE: Vrtes not found\")", "def addEdge(self,x,y):\n\t\tself._dictOut[x].append(y)\n\t\tself._dictIn[y].append(x)", "def get_edge(self, source: Node, target: Node) -> Optional[Edge]:\r\n return self.get_edge_by_index(source.index, target.index)", "def addEdge(self,x,y):\n\t\tself._matr[x][y] = True", "def add_edge_field(self,name,data,on_exists='fail'):\n if name in np.dtype(self.edge_dtype).names:\n if on_exists == 'fail':\n raise GridException(\"Edge field %s already exists\"%name)\n elif on_exists == 'pass':\n return\n elif on_exists == 'overwrite':\n self.edges[name] = data\n else:\n self.edges=recarray_add_fields(self.edges,\n [(name,data)])\n self.edge_dtype=self.edges.dtype", "def AddEdge(self, *args):\n return _BRepAlgo.BRepAlgo_Loop_AddEdge(self, *args)", "def addEdge(this, a, b):\n if not a in this.m:\n this.m[a]=set()\n this.m[a].add(b)", "def add_edge(self, position):\n raise NotImplementedError()", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError(\"That vertex does not exist!\")", "def addEdge(self, x, y):\n self._dictOut[x].append(y)\n self._dictIn[y].append(x)", "def edge(cls, edge):\n return cls(Lnk.EDGE, int(edge))", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError('That vertex does not exist')", "def add_edge(self, val1, val2):\n if val1 not in self._g:\n self.add_node(val1)\n if val2 not in self._g:\n self.add_node(val2)\n if val2 == val1:\n raise ValueError('Cannot have a self-referential edge.')\n if val2 in self._g[val1]:\n self._g[val1].remove(val2)\n self._g[val1].append(val2)", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError('nonexistent vertex/node')", "def add_edge(self, source_vertex, destination_vertex):\n self.vertex_edge[source_vertex].append(destination_vertex)\n new_edge = GraphEdge(source_vertex, destination_vertex)\n if destination_vertex not in self.vertex_edge:\n self.edges.append(new_edge)", "def get_edge(self, node, other_node):\n # if there exists an edge between the two input nodes\n if self.contains_edge(node, other_node):\n return \\\n {\n edge\n for edge in self.get_edges()\n if {\n node.get_name(),\n other_node.get_name()\n } ==\n {\n edge.get_first_incident_node().get_name(),\n edge.get_second_incident_node().get_name()\n }\n }.pop() # return the edge\n # otherwise raise an exception\n raise Exception(\"Invalid request: desired edge does not exist.\")", "def add_edge (self, src, dst, link):\n raise NotImplementedError", "def get_edge(self, edge):\n if len(edge) != 2:\n raise TypeError\n try:\n return self[edge[0]][edge[1]]\n except KeyError:\n return None", "def add_edge(self, edge: \"Edge\", loop: bool = False, first: bool = False) -> bool:\n if self not in [edge.parent, edge.child]:\n return False\n self.edges.append(edge)\n self.add_parent(edge.parent, loop, first)\n self.add_child(edge.child, loop, first)\n return True", "def add_incident_edge(self, incident_edge):\n self.incident_edges.add(incident_edge) # append the input edge to the set of incident edges", "def add_incident_edge(self, incident_edge):\n self.incident_edges.add(incident_edge) # append the input edge to the set of incident edges", "def add_edge(self, v1, v2):\n pass # TODO", "def add_edge(self, s, e):\n self.graph[s].append(e)", "def add_edge(self, v1, v2): # O(1) time complexity\n if v1 in self.vertices and v2 in self.vertices: # check to see if v1 & v2 exists already\n self.vertices[v1].add(v2) # # add connection from v1 to v2 \n else: # else \n print(\"That vertex does not exist\")\n\n # additional options (class)\n \"\"\"\n if (v1 or v2) not in self.vertices:\n return \"vertex does exist\"\n self.vertices[v1].add(v2)\n ###\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices{v1}.add(v2)\n else:\n print(\"One of these vertices does not exist)\n \"\"\"", "def add_edge(self, name1: Any, name2: Any, weight: float = 1.0) -> None:\n if name1 in self._vertices and name2 in self._vertices:\n v1 = self._vertices[name1]\n v2 = self._vertices[name2]\n\n # Add the new edge\n v1.neighbours[v2] = weight\n v2.neighbours[v1] = weight\n else:\n # We didn't find an existing vertex for both items.\n raise ValueError", "def get_edge(self, current_id):\n if current_id is None:\n raise Exception('Edge ID can not be None')\n return self.edges.get(current_id)", "def add_edge(self, ind_node, dep_node, graph=None):\n if not graph:\n graph = self.graph\n if ind_node not in graph or dep_node not in graph:\n raise KeyError(\"one or more nodes do not exist in graph\")\n test_graph = deepcopy(graph)\n test_graph[ind_node].add(dep_node)\n is_valid, message = self.validate(test_graph)\n if is_valid:\n graph[ind_node].add(dep_node)\n else:\n raise Exception(\"FUCK YOU\")", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices: self.vertices[v1].add(v2)\n else: raise IndexError(\"Nonexistant Vert.\")", "def add_edge(self, node):\n self._edges.append(node)", "def add_edge(self, id1: int, id2: int, weight: float) -> bool:\r\n # check if key exists in dictionary by checking if get() returned default value\r\n if self.Edges[id1].get(id2, -1) == -1:\r\n self.Edges[id1][id2]=weight\r\n self.Edges_In[id2][id1]=weight\r\n self.edgeSize += 1\r\n self.mc +=1\r\n return True\r\n return False\r\n raise NotImplementedError", "def Add(self, elAddr):\n if not self.Has(elAddr):\n edge = self.ctx.CreateEdge(ScType.EdgeDCommonConst, self.addr, elAddr)\n edge = self.ctx.CreateEdge(ScType.EdgeAccessConstPosPerm, self.relAddr, edge)\n\n return edge.IsValid()\n\n return False", "def update_edge(self, edge):\n\n if edge.uuid not in self._edges:\n error_str = \"Trying to update a non-existing edge with uuid: \"\\\n + str(edge.uuid)\n raise KeyError(error_str)\n\n if not isinstance(edge, Edge):\n error_str = \"Trying to update an object with the wrong type. \"\\\n + \"Edge expected.\"\n raise TypeError(error_str)\n\n edge_to_update = self._edges[edge.uuid]\n\n edge_to_update.data = edge.data\n edge_to_update.points = edge.points", "def edge(self) -> EdgeConfig:\n return self._edge", "def addEdge(self, vertex1, vertex2):\n self.addVertex(vertex1) \n self.addVertex(vertex2)\n\n if vertex2 not in self.adjList[vertex1]:\n self.adjList[vertex1].append(vertex2)", "def add_edge(options):\n vsm_obj = get_vsm_object(options, '4.0')\n edge = Edge(vsm_obj, '4.0')\n edge_schema = EdgeSchema(None)\n edge_schema.datacenterMoid = get_datacenter_id(options)\n edge_schema.appliances.applianceSize = 'compact'\n\n appliance_schema = ApplianceSchema()\n appliance_schema.datastoreId = get_datastore_id(options)\n appliance_schema.resourcePoolId = get_cluster_id(options)\n\n # XXX(hchilkot):\n # set default firewall rule to accept for edge,\n # this is required to pass any traffic across networks.\n result, features_schema = set_edge_features_schema(default_firewall_rule = FW_DEFAULT_RULE_ACCEPT)\n if not result:\n print(\"Result : %r. Received: %r for features schema. \\\n Failed to set edge features.\" % (result, features_schema))\n return False\n edge_schema.features = features_schema\n\n edge_schema.appliances.appliance = [appliance_schema]\n edge_schema.vnics = [get_vnic(options, 0)]\n edge_schema.name = get_edge_name(options)\n print (\"Creating edge %s\" % edge_schema.name)\n result = edge.create(edge_schema)\n if (result[0].response.status != 201):\n r_vars = vars(result[0])\n print(\"Create edge error: %s\" % result[0].response.reason)\n print ', '.join(\"%s: %s\" % item for item in r_vars.items())\n return False\n return True", "def addEdge(self,x,y):\r\n self.matr[x][y] = True\r\n self.matr[y][x] = True", "def add_edge(self, item1: Any, item2: Any, weight: Union[int, float]) -> None:\n if item1 in self._vertices and item2 in self._vertices:\n v1 = self._vertices[item1]\n v2 = self._vertices[item2]\n\n # Add the new edge\n v1.neighbours[v2] = weight\n v2.neighbours[v1] = weight\n else:\n # We didn't find an existing vertex for both items.\n raise ValueError", "def add_edge(self, id1: int, id2: int, weight: float) -> bool:\n if id1 in self.Nodes and id2 in self.Nodes and id2 not in self.Edges[id1]:\n if self.Edges[id1] is None:\n self.Edges[id1] = {}\n self.Edges[id1][id2] = weight\n else:\n self.Edges[id1][id2] = weight\n self.edgesize += 1\n self.MC += 1\n return True\n else:\n return False", "def addEdge(self, nVertex, pVertex=None):\n if not self.containsEdge(nVertex, pVertex):\n if type(nVertex) is edge:\n self.edges.append(nVertex)\n self.addVertex(nVertex.pvt)\n self.addVertex(nVertex.nvt)\n return nVertex\n else:\n self.addVertex(nVertex)\n self.addVertex(pVertex)\n newE = edge(nVertex, pVertex)\n self.edges.append(newE)\n return newE\n\n else:\n return self.getEdge(nVertex, pVertex)", "def add_edge(self, v1, v2):\n # First we check to see if the vertices we're trying to connect exist\n if v1 in self.vertices and v2 in self.vertices:\n # If they do exist, we add v2 as a neighbor to v1\n self.vertices[v1].add(v2)\n else:\n # If v1 or v2 does not exist, we raise an error\n raise IndexError(\"Vertex does not exist\")", "def add_Edge(self, node1, node2, weight=1):\n if node1 and node2 in self._adjacency_list:\n self._adjacency_list[node1].append(Edge(node2, weight))", "def add_edge(self, v1, v2):\n\n (x1, y1) = v1\n (x2, y2) = v2\n\n if not self.has_vertex(x1, y1) or not self.has_vertex(x2, y2): return\n if v1 not in self.get_neighbors(x2, y2): return\n\n self._reachable[v1].add(v2)\n self._reachable[v2].add(v1)", "def add_edge(\n self, subject_node: str, object_node: str, edge_key: str = None, **kwargs: Any\n ) -> None:\n if \"data\" in kwargs:\n data = kwargs[\"data\"]\n else:\n data = kwargs\n return self.graph.add_edge(subject_node, object_node, key=edge_key, **data)", "def get_edge(self, uuid):\n\n try:\n return Edge.from_edge(self._edges[uuid])\n except KeyError:\n error_str = \"Trying to get an non-existing edge with uuid: {}\"\n raise ValueError(error_str.format(uuid))", "def add_edge(self, node1, node2):\n if node1 not in self._neighbors:\n self._neighbors[node1] = []\n self._neighbors[node1].append(node2)\n if node2 not in self._neighbors:\n self._neighbors[node2] = []", "def add_edge(self, weight, attributes, first_incident_node, second_incident_node):\n # if the first incident node is not in the nodeset\n if first_incident_node.get_name() not in self.get_node_names():\n self.add_node(first_incident_node) # add the first incident node\n\n # if the second incident node is not in the nodeset\n if second_incident_node.get_name() not in self.get_node_names():\n self.add_node(second_incident_node) # add the second incident node\n\n edge = Edge(weight, attributes, first_incident_node, second_incident_node) # create the Edge object\n\n first_incident_node.add_incident_edge(edge) # connect the first and second incident nodes using the edge\n second_incident_node.add_incident_edge(edge)\n\n self.__check_validity() # check if graph is valid - throws exception if not\n\n return edge # return the newly added edge", "def add_edge(self, v1, v2):\n # add the 2nd node to the list of edges for the first node\n if v1 in self.vertices and v2 in self.vertices:\n\n self.vertices[v1].add(v2)", "def get_extended_by(self, edge):\n return Path(self, edge)", "def save_edge(self, edge: Union[dict, Edge]):", "def e(src, dst):\n edge = pydot.Edge(src, dst)\n graph.add_edge(edge)", "def make_edge(self, a, b):\n try: e = self.G.new_edge(a, b)\n except: return self.G.new_edge(a,b)\n\n try: self.G.set_edge_attribute(e, \"arrow\", \"true\")\n except: return self.G.new_edge(a,b)\n\n try: self.G.set_edge_attribute(e, \"spline\", \"false\")\n except: return self.G.new_edge(a,b)\n return e", "def addEdge(source, target):\n\n\t\t# append the edge which contain source and target to the graph defaultdict\n\t\tgraph[source].append(target)\n\n\t\t\"\"\"initialize reference dictionary for each node\"\"\"\n\n\t\t# append the source as key and 0 as value to the reference dictionary\n\t\treference[source] = [0, 0, False, False]", "def add_edge(self, vertex, sequence):\n \n #get available edge that these two words have been connected.\n edge = self._get_available_edge(vertex)\n \n\n if edge is None: # if not exisst, create a new one and append into edges\n edge = Edge(vertex, sequence)\n self.edges.append(edge)\n return\n\n #otherwise need to increase weight, and set the sequence\n edge.increase_weight()\n edge.add_sequence(sequence)", "def add_edge(self, tail, head):\n\n if tail not in self._vertices or head not in self._vertices:\n raise RuntimeError(\"Destination or source of edge ('{}'\".format(head) +\n \",'{}'\".format(tail) + \") cannot be found as a vertex\")\n else:\n self._vertices[tail].add_edge(self._vertices[head])\n self._vertices[head].increase_indegree()", "def copy_edge(self,\n graph,\n edge,\n new_src=None,\n new_src_conn=None,\n new_dst=None,\n new_dst_conn=None,\n new_data=None,\n remove_old=False):\n data = new_data if new_data else dcpy(edge.data)\n src = edge.src if new_src is None else new_src\n src_conn = edge.src_conn if new_src is None else new_src_conn\n dst = edge.dst if new_dst is None else new_dst\n dst_conn = edge.dst_conn if new_dst is None else new_dst_conn\n\n ret = graph.add_edge(src, src_conn, dst, dst_conn, data)\n\n if remove_old:\n graph.remove_edge(edge)\n return ret", "def add_edge(self, id1: int, id2: int, weight: float) -> bool:\n\n if id1 not in self.nodes or id2 not in self.nodes or weight<0 or id1==id2:\n return False\n try:\n self.edges_out[id1][id2]\n return False\n except:\n e = Edge(id1, id2, weight)\n self.edges_out[id1].update({id2: e})\n self.edges_in[id2].update({id1: e})\n self.edges_on_graph += 1\n self.mode_changes += 1\n self.get_nodes(id1).out_e += 1\n self.get_nodes(id2).in_e += 1\n return True", "def add_edge(self, e, k):\n assert len(self.e2k) == self.VEK[1] - 1\n assert len(self.k2e) == self.VEK[1] - 1\n v1, v2 = self.grid[1:, k]\n assert self.components[v1] != self.components[v2]\n self.k2e[k] = e\n self.e2k[e] = k\n self.neighbors[v1].add(v2)\n self.neighbors[v2].add(v1)\n self.components[:] = False\n assert len(self.e2k) == self.VEK[1]\n assert len(self.k2e) == self.VEK[1]", "def parse_edge(self, stmt):\r\n if stmt['label'] in self._names:\r\n raise ValueError('There is already a value with name {}'.format(stmt['label']))\r\n edge = Edge(label=stmt['label'],\r\n primary_key=stmt.get('primary_key', None),\r\n functional=stmt.get('functional', False))\r\n self._names += [edge.label]\r\n return edge", "def add_edge(self, n1, n2, weight=0):\n self.add_node(n1)\n self.add_node(n2)\n if n2 in self.node_dict[n1]:\n raise ValueError(\"Edge already exists\")\n self.node_dict[n1][n2] = weight", "def add_edge(self, v1, v2):\n pass # TODO\n # both vertices have to exist to make connection(e.g. directed edge)\n\n if v1 in self.vertices and v2 in self.vertices:\n # print(f' type(vertices) is {type(self.vertices)}')\n self.vertices[v1].add(v2) # using set .add() method to append\n else:\n # print(f'ERROR: vertex {v1} or {v2} does not exist') \n raise ValueError(\"Vertex not yet created\")\n # print(f'ERROR: vertex {v1} or {v2} does not exist')\n\n #### not quite\n # try:\n # if v1 in self.vertices or v2 in self.vertices:\n # self.vertices[v1].add(v2)\n # except:\n # raise ValueError(\" BAD VERTEX !!\")\n\n\n if v1 not in self.vertices or v2 not in self.vertices:\n raise ValueError(\" BAD VERTEX !!\")\n else:\n self.vertices[v1].add(v2)", "def add_edge(self, val1, val2):\n self.setdefault(val1, [])\n self.setdefault(val2, [])\n if val2 not in self[val1]:\n self[val1].append(val2)", "def edge(self, edge: EdgeConfig):\n\n self._edge = edge" ]
[ "0.7570172", "0.7543887", "0.7385329", "0.731582", "0.7228451", "0.72140056", "0.7205442", "0.7191608", "0.7144214", "0.712575", "0.7108078", "0.7094593", "0.7083264", "0.6992474", "0.6966737", "0.6870384", "0.6825222", "0.6818147", "0.67740756", "0.67177004", "0.6700951", "0.6675162", "0.66404897", "0.66286594", "0.6616903", "0.65793836", "0.6552262", "0.64829767", "0.64267427", "0.64159346", "0.64044535", "0.6384969", "0.63780624", "0.6374572", "0.6364583", "0.6360862", "0.6357906", "0.634451", "0.6323939", "0.63098705", "0.63088745", "0.62993383", "0.6298968", "0.6271049", "0.6265432", "0.625542", "0.6254019", "0.62503284", "0.62437916", "0.62186044", "0.6216261", "0.6209383", "0.6202878", "0.6198083", "0.6195734", "0.6193575", "0.6184998", "0.613493", "0.613493", "0.6111865", "0.61010194", "0.60836774", "0.6056771", "0.60559946", "0.60515076", "0.6046278", "0.60390866", "0.6003792", "0.5997389", "0.5992186", "0.5980445", "0.59638965", "0.5960242", "0.5958419", "0.5956395", "0.5945734", "0.59452826", "0.5906775", "0.58759296", "0.5872626", "0.5867257", "0.58669484", "0.5862176", "0.58605427", "0.58354723", "0.58306795", "0.5807541", "0.58024085", "0.58004135", "0.57988065", "0.57946986", "0.57934284", "0.57927626", "0.5785994", "0.5775217", "0.57714736", "0.5763379", "0.5740815", "0.57378876", "0.571951" ]
0.81017184
0
Add vertex to DCEL if it doesn't already exists, otherwise return the existing vertex.
def add_vertex(self, vertex): try: vertex_idx = self.vertices.index(vertex) # print "{} already in {}".format(vertex, self.vertices) return self.vertices[vertex_idx] except Exception: self.vertices.append(vertex) # print "adding {} to {}".format(vertex, self.vertices) return vertex
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_vertex(self, vertex):\n if vertex.id not in self.vertices.keys():\n self.vertices[vertex.id] = vertex", "def _add_vertex(self, x, y):\n v = Vertex2(x, y)\n i = bisect(self.vertices, v)\n \n # if vertex at these coordinates exists just return it\n if len(self.vertices) > i and self.vertices[i] == v:\n return self.vertices[i]\n \n # otherwise add new vertex in sorted position and return it\n self.vertices.insert(i, v)\n return v", "def add_vertex(self, v):\n v = {'x': v[0], 'y': v[1]}\n if v not in self:\n self.append(v)\n return len(self)-1\n return self.index(v)", "def add_vertex(self, vertex):\n if vertex not in self.graph_dict:\n self.graph_dict[vertex] = []\n return vertex", "def add_vertex(self, vertex):\n if self.contains(vertex):\n return None\n if self.is_weighted():\n self._graph[vertex] = dict()\n else:\n self._graph[vertex] = set()\n return True", "def add_vertex(self, vertex_id): # O(1) time complexity\n self.vertices[vertex_id] = set() \n\n # additional options (class)\n '''\n if vertex_id not in self.vertices:\n self.vertices[vertex_id] = {}\n\n else:\n return \"Vertex is already in Graph\"\n '''", "def add_vertex(self, key):\n self.vertCount += 1\n addedVertex = vertex.Vertex(key)\n self.vertList[key] = addedVertex\n return addedVertex", "def add_vertex(self, vertex):\n raise NotImplementedError", "def add_vertex(self, vertex_id):\n pass # TODO", "def add_vertex(self, key):\n #increments the number of vertices\n #creates a new vertex\n #adds the new vertex to the vertex list\n #returns the new vertex\n if key != None:\n self.num_vertices += 1\n new_vertex = Vertex(key)\n self.vert_list[key] = new_vertex\n return new_vertex\n raise KeyError(\"There's no key here\")", "def addVertex(self, key):\n if key not in self.vertList:\n self.numVertices += 1\n vtx = Vertex(key)\n self.verList[key] = vtx\n return vtx", "def add_vertex(self, vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []", "def add_vertex(self, vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []", "def add_vertex(self, key):\n vertex = Vertex(key)\n self.vertices += 1\n self.graph[key] = vertex\n\n return vertex", "def add_vertex(self, vertex):\r\n if vertex not in self.__graph_dict:\r\n self.__graph_dict[vertex] = {}", "def add_vertex(self, key):\n if key in self.vertices:\n raise ValueError('Key is already in use')\n \n # Create vertex\n self.vertices[key] = GraphVertex(key=key)", "def add_vertex(self, vertex):\n if vertex not in self.graph_dict:\n self.graph_dict[vertex] = []", "def add_vertex(self, vertex):\r\n if self.is_vertex_in_graph(vertex):\r\n raise GraphException(\"The vertex already exists.\")\r\n self.__neighbours[vertex] = []", "def add_vertex(self,vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []\n # logging.debug(\"vertex being initialized ..\", vertex)\n else:\n # logging.debug(\"vertex not added ..\", vertex)\n pass", "def add_vertex(self, key):\n # increment the number of vertices\n self.num_vertices += 1\n # create a new vertex\n vertex = Vertex(key)\n # add the new vertex to the vertex dictionary with a list as the value\n # self.vert_dict[vertex] = []\n # add the new vertex to the vertex list\n self.vert_dict[key] = vertex\n # return the new vertex\n return vertex", "def __add__(self, vertex):\n\n if isinstance(vertex, Vertex):\n vName = vertex.name\n self._vertices[vName] = vertex", "def add_vertex(self, vertex_id):\n if vertex_id not in self.vertices:\n self.vertices[vertex_id] = set()", "def add_vertex(self, vertex_id):\n if vertex_id not in self.vertices: self.vertices[vertex_id] = set()", "def addVertex(self, v: Vertex):\n if v is not None:\n self._vertices.add(v)\n\n # Possibly need to recalculate genus/core/etc.\n self.invalidateCaches()", "def add_vertex(self, key):\n vertex = Vertex(key)\n self.vertices[key] = vertex", "def add_vertex(self, vertex_id):\n # creates a vertex with an empty list as their neighboring vertices\n self.vertices[vertex_id] = set()", "def add_vertex(self, vertex_id):\n # just add new dict entry\n self.vertices[vertex_id] = set()\n\n pass # TODO", "def add_vertex(self, v: str) -> None:\n if self.contains_vertex(v):\n return\n else:\n self.adj_list[v] = []", "def add_vertex(self, vertex: Vertex) -> None:\n self._vertices.add(vertex)\n if not vertex.predicate:\n self._entities.add(vertex)", "def add_vertex(self, label=None, properties=None, current_id=None):\n if current_id is None:\n done = False\n while not done:\n next_id = self.get_next_id()\n\n if next_id not in self.vertices:\n current_id = next_id\n done = True\n else:\n if current_id in self.vertices:\n raise Exception('Vertex with ID Already Exist')\n\n current_vertex = Vertex(self, current_id, label=label, properties=properties)\n self.vertices[current_vertex.id] = current_vertex\n return current_vertex", "def add_vertex(self, vertex_id):\n # add new vertex in vertices\n self.vertices[vertex_id] = set()\n\n # increment len\n self.len += 1", "def add_vertex(self, vertex_name: n):\n new_vertex = Vertex(vertex_name)\n self._graph[new_vertex.name] = new_vertex", "def add_vertex(self, vertex):\n if isinstance(vertex, Vertex):\n self.vertices.append(vertex)\n return\n raise TypeError('Is not vertex instance!')", "def add_vertex(self, label):\n\n if label in self._vertices:\n raise RuntimeError(\"vertex = '{}'\".format(label) + \n \" is already a vertex in this directed graph\")\n self._vertices[label] = Vertex(label)", "def add_vertex(self, vertex_id):\n self.vertices[vertex_id] = set()", "def add_vertex(self, vertex_id):\n self.vertices[vertex_id] = set()", "def add_vertex(self, vertex_id):\n self.vertices[vertex_id] = set()", "def add_vertex(self, vertex_id):\n self.vertices[vertex_id] = set()", "def add_vertex(self):\n self.visited_node += [False]\n self.V = self.V + 1\n self.adjacency_list.append(list())", "def add_vertex(self, vertex_id):\n self.vertices[vertex_id] = set() # set of edges from this vert\n # a set is like a list except it allows O(1) lookups like a hashtable and it doesn't allow duplicates", "def add_vertex(self, key: str, data=None):\n if key in self._vertex_map:\n self._vertex_map[key].data = data\n else:\n v = Vertex(key, data)\n self._vertex_map[key] = v", "def addVertex(self, v):\r\n self.adjacent.setdefault(v, list())", "def add_vertex(self, vertex):\n self.vertices.append(vertex)\n self.vertex_edge[vertex] = []", "def add_vertex(self, vertex):\n self[vertex] = {}", "def addVertex(self, vertexNumber):\n try:\n self.vertexIndex[vertexNumber]\n raise VertexError(vertexNumber, ErrorMessages.vertexAlreadyExists)\n except KeyError:\n self.vertexIndex[vertexNumber] = Vertex(vertexNumber)\n return", "def add_vertex(self, value):\n vertex = Vertex(value)\n self._adjacency_list[vertex.value]= []\n return vertex", "def add_vertex(self, item: Any, kind: str) -> None:\n if item not in self._vertices:\n self._vertices[item] = _Vertex(item, kind)", "def create_vertex(c, gene_id, genome_build, chromosome, pos, vertices):\n # Check if the vertex exists. If yes, add current gene ID to it\n query = \",\".join([genome_build, chromosome, str(pos)])\n if query in vertices.keys():\n vertices[query][-1].add(gene_id)\n existing_vertex_id = vertices[query][0]\n return existing_vertex_id, vertices\n\n # In the case of no match, create the edge\n # Get ID number from counter\n vertex_id = vertices[\"counter\"] + 1\n vertices[\"counter\"] += 1\n genes = set()\n genes.add(gene_id)\n new_vertex = [vertex_id, genome_build, chromosome, pos, genes]\n keyname = \",\".join([genome_build, chromosome, str(pos)])\n vertices[keyname] = new_vertex\n\n return vertex_id, vertices", "def addVertex(self, vertex):\n if vertex not in self.adjList:\n self.adjList[vertex] = []", "def add_edge(self, v1, v2): # O(1) time complexity\n if v1 in self.vertices and v2 in self.vertices: # check to see if v1 & v2 exists already\n self.vertices[v1].add(v2) # # add connection from v1 to v2 \n else: # else \n print(\"That vertex does not exist\")\n\n # additional options (class)\n \"\"\"\n if (v1 or v2) not in self.vertices:\n return \"vertex does exist\"\n self.vertices[v1].add(v2)\n ###\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices{v1}.add(v2)\n else:\n print(\"One of these vertices does not exist)\n \"\"\"", "def add_vertex(self, u, val):\n raise NotImplementedError()", "def addVertex(self, v, n, startIndex, apex=False):\n for vv in self._sharedVertices[-1::-1]:\n if self.verticesEqual(v, vv):\n break\n else:\n self._sharedVertices.append(v)\n if not apex:\n # blend with the previous patch?\n startIndex = min(startIndex, self._prevPatchStartIndex)\n i = self._nVertices - 1\n while i >= startIndex:\n if self.verticesEqual(self._vertices[i], v):\n # vertex found\n # sum its normal with the duplicate vertex's normal\n nn = QVector3D(*self._normals[i])\n nn += QVector3D(*n)\n nn.normalize()\n self._normals[i] = [nn.x(), nn.y(), nn.z()]\n return i\n i -= 1\n # vertex not found or it's an apex vertex\n self._vertices.append(v)\n self._normals.append(n)\n self._nVertices += 1\n return self._nVertices - 1", "def addVertex(self, arg1, arg2=0, arg3=0):\n if type(arg1) is tuple:\n x, y, z = arg1\n elif type(arg1) is vector:\n x, y, z = arg1.coords()\n elif type(arg1) is float or type(arg1) is int:\n x, y, z = arg1, arg2, arg3\n elif type(arg1) is vertex:\n try:\n newV = self.getVertex(arg1.x, arg1.y, arg1.z, COMPARISON_EPSILON)\n except ValueError:\n newV = arg1\n self.vertices.append(arg1)\n return newV\n else:\n raise ValueError('bad argument type: ' + str(type(arg1)))\n try:\n newV = self.getVertex(x, y, z, COMPARISON_EPSILON)\n except ValueError:\n newV = vertex(x, y, z)\n self.vertices.append(newV)\n return newV", "def add_vertex(self, data):\n\n if self.__validate_node_data(data):\n Grapheap.count_nodes += 1\n node = Node(\n Grapheap.count_nodes,\n data,\n self.optimisation_keys)\n\n return node\n\n else:\n # _validate_node_data will return True or raise exception\n pass", "def insert_vertex(self,x=None):\n v = self.Vertex(x)\n self._outgoing[v] = {}\n if self.is_directed():\n self._incoming[v] = {} # need distinct map for incoming edges\n return v", "def add(self, key, data):\n if key not in self.vertices:\n self.numberOfVertices += 1\n self.vertices[key] = Vertex(key, data)\n return True\n\n return False", "def add_vertex(self, v, i=None):\n if len(self) < 2:\n return LineString.add_vertex(self, v, i)", "def get_vertex(self, name):\n return self.vertices[name]", "def get_vertex(self, key):\n\n vertex = None\n try: \n vertex = self.graph[key]\n except KeyError:\n raise ValueError(\"Vertex with key {} not in Graph\".format(key))\n\n return vertex", "def add_vertex(self, v):\n self[v] = {}", "def add_vertex(self, v):\n self[v] = {}", "def get_vertex(self, vertex_name: n) -> Union[Vertex, None]:\n try:\n return self._graph[vertex_name]\n except KeyError:\n return None", "def add_vertex(self, node, channel=0):\n \n self.num_vertices = self.num_vertices + 1\n new_vertex = Vertex(node, channel)\n self.vert_dict[node] = new_vertex\n return new_vertex", "def get_vertex(self, key):\n if key in self.vertList:\n return self.vertList[key]\n else:\n return None", "def get_vertex(self, vertex):\n # return the vertex if it is in the graph\n if vertex in self.vert_dict:\n return self.vert_dict[vertex]\n else:\n raise ValueError('Vertex not in graph')", "def get_vertex(self, n):\n #returns the vertex if it is in the graph\n if self.vert_list[n] != None:\n return self.vert_list[n]\n else:\n raise KeyError(\"It would appear the vertex you are searching for does not exist\")", "def add_vertex(self, vertex_name, vertex_type=None):\n if vertex_name not in self._vertex_dict:\n self._labels.InsertNextValue(vertex_name)\n self._vertex_dict[vertex_name] = self.vertex_tuple(Vertex(vertex_name, vertex_type),\n self._graph.AddVertex())\n if vertex_type not in self._color_dict:\n self._color_dict[vertex_type] = self._vertex_types\n self._vertex_types += 1\n self._colors.append(self._color_dict[vertex_type])", "def add(self, vertex):\n if not self.first:\n self.first = vertex\n self.first.next = vertex\n self.first.prev = vertex\n else:\n next = self.first\n prev = next.prev\n next.prev = vertex\n vertex.next = next\n vertex.prev = prev\n prev.next = vertex", "def get_vertex(self, current_id):\n if current_id is None:\n raise Exception('Vertex ID can not be None')\n return self.vertices.get(current_id)", "def add_vertex(self, event: matplotlib.backend_bases.LocationEvent) \\\n -> None:\n\n transform_func = event.inaxes.transData.inverted().transform\n new_coords = transform_func((event.x, event.y))\n log.info(f'Adding Vertex @ {event.x} - {event.y}, axis: '\n f'{new_coords}')\n graph = self._get_connected_graph(event.inaxes)\n vertex = Vertex()\n vertex.attr['x'] = float(new_coords[0])\n vertex.attr['y'] = float(new_coords[1])\n graph.add(vertex)\n self._redraw_graph()", "def add_vertex(self, name: Any, url: str) -> None:\n if name not in self._vertices:\n self._vertices[name] = _WeightedVertex(name, url)", "def get_vertex(self, key):\n return self.vertices[key]", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError(\"That vertex does not exist!\")", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError('That vertex does not exist')", "def add_vertex(self, room):\r\n if room['room_id'] not in self.rooms:\r\n self.rooms[room['room_id']] = room\r\n # self.rooms[room['room_id']]['exits'] = {\r\n # d: '?' for d in room['exits']}\r", "def agregar_vertice(self, v):\n if not v in self.vertices.keys():\n self.vertices[v] = {}", "def getVertex(self, key):\n return self.vertList[key]", "def addVertex(self,x):\n self.dictOut[x]=[]", "def add_vertex(self, vertex_id, dirs):\n self.vertices[vertex_id] = {i: \"?\" for i in dirs}", "def add_protein( self, protein ):\n v = get_vertex( protein )\n if v: return v # already added", "def get_vertex(self, label):\n\n return self._vertices[label]", "def adicionaVertice(self, vertice):\r\n self.vertices.append(vertice)", "def addEdge(self, nVertex, pVertex=None):\n if not self.containsEdge(nVertex, pVertex):\n if type(nVertex) is edge:\n self.edges.append(nVertex)\n self.addVertex(nVertex.pvt)\n self.addVertex(nVertex.nvt)\n return nVertex\n else:\n self.addVertex(nVertex)\n self.addVertex(pVertex)\n newE = edge(nVertex, pVertex)\n self.edges.append(newE)\n return newE\n\n else:\n return self.getEdge(nVertex, pVertex)", "def add_edge(self, v1, v2):\n pass # TODO\n # both vertices have to exist to make connection(e.g. directed edge)\n\n if v1 in self.vertices and v2 in self.vertices:\n # print(f' type(vertices) is {type(self.vertices)}')\n self.vertices[v1].add(v2) # using set .add() method to append\n else:\n # print(f'ERROR: vertex {v1} or {v2} does not exist') \n raise ValueError(\"Vertex not yet created\")\n # print(f'ERROR: vertex {v1} or {v2} does not exist')\n\n #### not quite\n # try:\n # if v1 in self.vertices or v2 in self.vertices:\n # self.vertices[v1].add(v2)\n # except:\n # raise ValueError(\" BAD VERTEX !!\")\n\n\n if v1 not in self.vertices or v2 not in self.vertices:\n raise ValueError(\" BAD VERTEX !!\")\n else:\n self.vertices[v1].add(v2)", "def newVertex(self, inter):\n try:\n if inter.nV is None:\n raise AttributeError\n return inter.nV\n except AttributeError:\n vert = inter.v if type(inter) is intersector else inter\n inter.nV = self.result.addVertex(vert.x, vert.y, vert.z)\n assert inter.nV is not None\n return inter.nV", "def add_vertex(self, x, y):\n\n if not isinstance(x, int) and not isinstance(x, float):\n raise TypeError(\"x must be numeric, not '%s'\" % x)\n if not isinstance(y, int) and not isinstance(y, float):\n raise TypeError(\"y must be numeric, not '%s'\" % y)\n self._coordinates.append(x)\n self._coordinates.append(y)", "def add_vertex(self, x, y):\n\n if not isinstance(x, int) and not isinstance(x, float):\n raise TypeError(\"x must be numeric, not '%s'\" % x)\n if not isinstance(y, int) and not isinstance(y, float):\n raise TypeError(\"y must be numeric, not '%s'\" % y)\n self._coordinates.append(x)\n self._coordinates.append(y)", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].edges.add(v2)\n self.vertices[v2].edges.add(v1)\n else:\n raise IndexError(\"That vertex does not exist!\")", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError('nonexistent vertex/node')", "def getVertex(self, index):\n Parameter.checkIndex(index, 0, len(self.V))\n return self.V[index]", "def addVertex(self, v, vertices):\n vn = v / np.linalg.norm(v) * self._radius\n vertices += [[vn[0], vn[1], vn[2]]]\n return len(vertices)-1", "def add_directed_edge(self, v1, v2):\n if v1 in self.vertices:\n self.vertices[v1].edges.add(v2)\n else:\n raise IndexError(\"That vertex does not exist!\")", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices: self.vertices[v1].add(v2)\n else: raise IndexError(\"Nonexistant Vert.\")", "def append_vertex(remote, objectid, position, normal=(0,1,0), color=(1,1,1) ):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_AppendVertex(objectid, to_vec3f(position), to_vec3f(normal), to_vec3f(color) )\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_AppendVertex(key1, result_val)\n return result_val.i", "def add_edge(self, v1, v2):\n # Check if they exist\n # if v1 in self.vertices and v2 in self.vertices:\n if v1 in self.vertices:\n # Add the edge\n self.vertices[v1].add(v2)\n else:\n print(f\"ERROR ADDING EDGE between {v1} and {v2} : Vertex not found\")", "def get_vertex(self, crs=None):\n if crs is None or crs==self.crs:\n return self.vertex\n else:\n return _reproject((self.x, self.y), self.crs, crs)", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n print(\"ERROR ADDING EDGE: Vrtes not found\")", "def selectVertex(self, addToSelection: bool) -> None:\n ...", "def __getitem__(self, key):\n\n aVertex = self._vertices[key]\n return aVertex", "def get_vertex(self, n):\n \n if n in self.vert_dict:\n return self.vert_dict[n]\n else:\n return None" ]
[ "0.7698422", "0.75747514", "0.7562474", "0.75239784", "0.7504211", "0.7403369", "0.7356343", "0.7279313", "0.71772546", "0.7124853", "0.70904213", "0.7066278", "0.7066278", "0.7043618", "0.70346177", "0.7016917", "0.69948727", "0.6946342", "0.6899464", "0.6882716", "0.6859618", "0.68538505", "0.6830724", "0.6830189", "0.6787582", "0.6782125", "0.676277", "0.6751066", "0.6738139", "0.6733741", "0.6718132", "0.67168397", "0.6702167", "0.6677883", "0.6658493", "0.6658493", "0.6658493", "0.6658493", "0.6599901", "0.65994805", "0.6564733", "0.6538001", "0.6531033", "0.65276", "0.6507024", "0.6487282", "0.6456442", "0.6453262", "0.6448038", "0.6431041", "0.64239126", "0.6422452", "0.63716173", "0.63466084", "0.63181823", "0.6318113", "0.63157743", "0.62793434", "0.6273907", "0.6269034", "0.6269034", "0.6261513", "0.6257612", "0.62399036", "0.62108034", "0.6195283", "0.61871445", "0.6170388", "0.6165319", "0.61536866", "0.6149769", "0.6137836", "0.61170155", "0.60877484", "0.6050592", "0.5986259", "0.59537005", "0.5946035", "0.59344405", "0.59030515", "0.5889558", "0.58771867", "0.58423764", "0.58256966", "0.5802495", "0.5789255", "0.5789255", "0.5787585", "0.5776968", "0.5767778", "0.57176137", "0.57162637", "0.56972814", "0.56886643", "0.5688628", "0.56785405", "0.5673432", "0.5636896", "0.5613418", "0.5600597" ]
0.8166046
0
Construct a DCEL object.
def __init__(self, vertices=None, edges=None, faces=None): super(DCEL, self).__init__() self.vertices = vertices or [] self.edges = edges or [] self.faces = faces or []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build(*args, **kwargs):\n\n\treturn Excel(*args, **kwargs)", "def __init__(self, name=None, compile_paths=combined_path):\n Hay2011Cell.__init__(self, name=name, compile_paths=compile_paths)", "def new( self, d, x, y, dx, n='temp.grd', nd=-999.0):\n self.data = d\n self.name = n\n self.ncols = self.data.shape[1]\n self.nrows = self.data.shape[0]\n self.xllcorner = x\n self.yllcorner = y\n self.cellsize = dx\n self.nodata = nd", "def __new__(cls, wfr_diameter=76.2, edge_clearance=2.0, x0=0.0, y0=0.0, width=0.0, height=0.0, filename=None):\n if filename is not None:\n with open(filename,'rb') as f:\n inst = pickle.load(f)\n if not isinstance(inst, cls):\n raise TypeError('Unpickled object is not of type {}'.format(cls))\n else:\n inst = super(cellDistribution, cls).__new__(cls)\n\n return inst", "def _constructor(self):\r\n return SpatialDataFrame", "def __init__(self,name=None,zcmb=None,zcmb_err=None,\n ra=None,dec=None, mjd=None,\n type_=None,cosmo=None,\n load_from=None,empty=False,sfd98_dir=None,\n **kwargs):\n self.__build__()\n \n if empty:\n return\n \n if load_from is not None:\n self.load(load_from,**kwargs)\n return\n\n self.define(name, zcmb,ra,dec,mjd=mjd,\n cosmo=cosmo,type_=type_,sfd98_dir=sfd98_dir,\n **kwargs)\n return", "def __init__(self, dset, grid=None):\n self.grid = xgcm.Grid(dset) if grid is None else grid\n self.coords = dset.coords.to_dataset().reset_coords()\n self.dset = dset\n self.terms = None\n \n # self.dset = dset.reset_coords(drop=True)\n # self.volume = dset.drF * dset.hFacC * dset.rA\n \n self.BCx = 'periodic' if self.grid.axes['X']._periodic else 'fill'\n self.BCy = 'periodic' if self.grid.axes['Y']._periodic else 'fill'", "def __init__(self, *args, **kwargs):\n if len(args) > 0: self._init_from_ascii(*args)\n if len(kwargs) > 0: self._init_from_keywords(**kwargs)\n discdate = ephem.Date(self.jd - 2415020.0)\n self.ra_str = str(ephem.hours(self.ra * ephem.pi/180))\n if self.ra < 150.0:\n self.ra_str = '0' + self.ra_str\n self.dec_str = str(ephem.degrees(self.dec * ephem.pi/180))\n if abs(self.dec) < 10.0:\n self.dec_str = self.dec_str[0] + '0' + self.dec_str[1:]\n if self.dec > 0:\n self.dec_str = '+' + self.dec_str\n self.date_str = discdate.datetime().strftime(\"%d %b %Y\")\n self.name = \"CSS{}:{}{}\".format(\n discdate.datetime().strftime(\"%y%m%d\"),\n self.ra_str.replace(':','')[:6],\n self.dec_str.replace(':','')[:7])", "def __init__(self, exciton_obj=None, dipole_term=False):\n self.exciton_obj = exciton_obj\n self.file_storage = exciton_obj.file_storage\n self.k_grid = exciton_obj.k_grid\n self.a1, self.a2 = exciton_obj.a1, exciton_obj.a2\n self.n_spins = exciton_obj.n_spins\n self.n_orbs = exciton_obj.n_orbs\n self.use_dipole_term = dipole_term", "def __init__(self, cell_type, explicit_values,\n derived_values=None, non_column_labels=None, align='',\n sort_values=True):\n self.type = cell_type\n self.align = align\n self.col_index = 0 # Is set afterward\n self.values = []\n if non_column_labels:\n self.non_column_labels = [\n template_helpers.EZTItem(value=v, is_derived=ezt.boolean(d))\n for v, d in non_column_labels]\n else:\n self.non_column_labels = []\n\n for v in (sorted(explicit_values) if sort_values else explicit_values):\n self.values.append(CellItem(v))\n\n if derived_values:\n for v in (sorted(derived_values) if sort_values else derived_values):\n self.values.append(CellItem(v, is_derived=True))", "def create_object(self, confM2R, grd_filename):\n ds = xr.open_dataset(grd_filename)\n\n if self.type == 'FORCINGDATA':\n\n logging.info(\"[M2R_grd] ---> Assuming {} grid type for {}\".format(confM2R.grd_type, self.type))\n logging.info(\"[M2R_grd] ---> Using dimension names {} and {} and {}\".format(confM2R.lon_name,\n confM2R.lat_name,\n confM2R.depth_name))\n\n self.lon = ds[str(confM2R.lon_name)][:]\n self.lat = ds[str(confM2R.lat_name)][:]\n self.h = ds[str(confM2R.depth_name)][:]\n self.nlevels = len(self.h)\n self.fillval = -9.99e+33\n self.hc = None\n\n if self.lon.ndim == 1:\n self.lon, self.lat = np.meshgrid(self.lon, self.lat)\n\n # Create grid for ESMF interpolation\n\n self.esmfgrid = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True, coord_names=[str(confM2R.lon_name), str(confM2R.lat_name)],\n add_mask=False)\n self.esmfgrid_u = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[str(confM2R.lon_name_u), str(confM2R.lat_name_u)],\n add_mask=False)\n self.esmfgrid_v = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[str(confM2R.lon_name_v), str(confM2R.lat_name_v)],\n add_mask=False)\n\n if confM2R.ocean_indata_type == 'SODA3':\n self.fillval = -1.e+20\n if confM2R.ocean_indata_type == 'SODA3_5DAY':\n self.fillval = -1.e+20\n if confM2R.ocean_indata_type == 'GLORYS':\n self.fillval = 9.96921e+36\n\n if confM2R.ocean_indata_type == 'NORESM':\n # self.h = ds[\"depth\"][:]\n self.h = np.asarray([0, 5, 10, 15, 20, 25, 30, 40, 50, 62.5, 75, 87.5, 100, 112.5, 125,\n 137.5, 150, 175, 200, 225, 250, 275, 300, 350, 400, 450, 500, 550, 600,\n 650, 700, 750, 800, 850, 900, 950, 1000, 1050, 1100, 1150, 1200, 1250,\n 1300, 1350, 1400, 1450, 1500, 1625, 1750, 1875, 2000, 2250, 2500, 2750,\n 3000, 3250, 3500, 3750, 4000, 4250, 4500, 4750, 5000, 5250, 5500, 5750,\n 6000, 6250, 6500, 6750])\n self.fillval = 32768\n self.nlevels = len(self.h)\n\n IOverticalGrid.get_z_levels(self)\n\n if self.type == 'STATION':\n self.lon = ds[confM2R.lon_name][:]\n self.lat = ds[confM2R.lat_name][:]\n self.h = ds[confM2R.depth_name][:]\n self.time = ds[confM2R.time_name][:]\n\n self.Lp = 1\n self.Mp = 1\n self.fillval = -9.99e+33\n\n if self.type in ['ROMS']:\n\n self.write_clim = True\n self.write_bry = True\n self.write_init = True\n self.write_stations = False\n\n self.lonname = 'lon_rho'\n self.latname = 'lat_rho'\n\n \"\"\"\n Set initTime to 1 if you dont want the first time-step to be\n the initial field (no ubar and vbar if time=0)\n \"\"\"\n\n self.inittime = 0\n self.ocean_time = 0\n self.NT = 2\n self.tracer = self.NT\n\n self.message = None # Used to store the date for printing to screen (IOwrite.py)\n self.time = 0\n self.reftime = 0\n self.grdtype = 'regular'\n self.mask_rho = ds[\"mask_rho\"][:, :]\n self.lon_rho = ds[\"lon_rho\"][:, :]\n self.lat_rho = ds[\"lat_rho\"][:, :]\n self.h = ds[\"h\"][:, :]\n\n masked_h = np.where(self.h > 0, self.h, self.h.max())\n\n self.hmin = masked_h.min()\n if \"Vtransform\" in ds.variables:\n self.vtransform = ds[\"Vtransform\"].values\n else:\n self.vtransform = confM2R.vtransform\n\n if \"s_rho\" in ds.variables:\n self.s_rho = ds[\"s_rho\"].values\n self.nlevels = len(self.s_rho)\n else:\n self.nlevels = confM2R.nlevels\n\n if \"Vstretching\" in ds.variables:\n self.vstretching = ds[\"Vstretching\"].values\n if \"theta_s\" in ds.variables:\n self.theta_s = ds[\"theta_s\"].values\n else:\n self.theta_s = confM2R.theta_s\n if \"theta_b\" in ds.variables:\n self.theta_b = ds[\"theta_b\"].values\n else:\n self.theta_b = confM2R.theta_b\n if \"Tcline\" in ds.variables:\n self.tcline = ds[\"Tcline\"].values\n else:\n self.tcline = confM2R.tcline\n if \"hc\" in ds.variables:\n self.hc = ds[\"hc\"].values\n else:\n self.hc = confM2R.hc\n\n if self.vtransform == 1:\n self.hc = min(self.hmin, self.tcline)\n self.hc = self.tcline\n if self.tcline > self.hmin:\n print('Vertical transformation parameters are not defined correctly in either gridid.txt '\n 'or in the history files: \\n Tc\\\n line = %d and hmin = %d. \\n You need to make sure that '\n 'tcline <= hmin when using transformation 1.' % (\n self.tcline, self.hmin))\n else:\n self.hc = self.tcline\n\n zeta = None\n if zeta is None:\n self.zeta = np.zeros(self.h.shape)\n else:\n self.zeta = zeta\n\n # for findvar in ds:\n # if findvar==\"hraw\":\n # self.hraw = ds[\"hraw\"][:,:,:]\n\n self.lon_u = ds[\"lon_u\"][:, :]\n self.lat_u = ds[\"lat_u\"][:, :]\n self.mask_u = ds[\"mask_u\"][:, :]\n for findvar in ds:\n if findvar == \"lon_vert\":\n self.lon_vert = ds[\"lon_vert\"][:, :]\n self.lat_vert = ds[\"lat_vert\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_rho\":\n self.x_rho = ds[\"x_rho\"][:, :]\n self.y_rho = ds[\"y_rho\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_u\":\n self.x_u = ds[\"x_u\"][:, :]\n self.y_u = ds[\"y_u\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_v\":\n self.x_v = ds[\"x_v\"][:, :]\n self.y_v = ds[\"y_v\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_psi\":\n self.x_psi = ds[\"x_psi\"][:, :]\n self.y_psi = ds[\"y_psi\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_vert\":\n self.x_vert = ds[\"x_vert\"][:, :]\n self.y_vert = ds[\"y_vert\"][:, :]\n\n for findvar in ds:\n if findvar == \"xl\":\n self.xl = ds[\"xl\"]\n self.el = ds[\"el\"]\n\n for findvar in ds:\n if findvar == \"dmde\":\n self.dmde = ds[\"dmde\"][:, :]\n self.dndx = ds[\"dndx\"][:, :]\n\n self.lon_v = ds[\"lon_v\"][:, :]\n self.lat_v = ds[\"lat_v\"][:, :]\n self.mask_v = ds[\"mask_v\"][:, :]\n\n # self.spherical = ds[\"spherical\"][:]\n\n self.lon_psi = self.lon_u[:-1, :]\n self.lat_psi = self.lat_v[:, :-1]\n self.mask_psi = self.mask_v[:, :-1]\n\n # self.f = ds[\"f\"][:, :]\n self.angle = ds[\"angle\"][:, :]\n\n self.pm = ds[\"pm\"][:, :]\n self.invpm = 1.0 / np.asarray(ds[\"pm\"][:, :])\n self.pn = ds[\"pn\"][:, :]\n self.invpn = 1.0 / np.asarray(ds[\"pn\"][:, :])\n\n self.Lp = len(self.lat_rho[1, :])\n self.Mp = len(self.lat_rho[:, 1])\n\n self.fillval = -9.99e33\n\n self.eta_rho = self.Mp\n self.eta_u = self.Mp\n self.eta_v = self.Mp - 1\n self.eta_psi = self.Mp - 1\n self.xi_rho = self.Lp\n self.xi_u = self.Lp - 1\n self.xi_v = self.Lp\n self.xi_psi = self.Lp - 1\n\n # Boolean to check if we need to initialize the CLIM file before writing\n self.ioClimInitialized = False\n self.ioInitInitialized = False\n\n if self.lon_rho.ndim == 1:\n self.lon_rho, self.lat_rho = np.meshgrid(self.lon_rho, self.lat_rho)\n self.lon_u, self.lat_u = np.meshgrid(self.lon_u, self.lat_u)\n self.lon_v, self.lat_v = np.meshgrid(self.lon_v, self.lat_v)\n\n # Setup the vertical coordinate system\n IOverticalGrid.calculateVgrid(self)\n\n self.esmfgrid_u = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n coord_names=['lon_u', 'lat_u'],\n is_sphere=True,\n add_mask=False)\n self.esmfgrid_v = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=['lon_v', 'lat_v'],\n add_mask=False)\n self.esmfgrid = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[self.lonname, self.latname],\n add_mask=False)", "def __init__(self, code, start_date=\"1900-01-01\", end_date=\"2020-01-01\"):\n base = Base()\n self.datas = base.getData(\n code=code, start_date=start_date, end_date=end_date)\n self._index = 0\n self.period = 14", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDUC.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, *args, **kargs):\n \n # ========== Class Data Attributes ===================================\n self.name = 'x'\n r\"\"\"(string) Name of this coordinate dimension (e.g. 'x')\"\"\"\n self.num_cells = None\n r\"\"\"(int) - Number of cells in this dimension :attr:`units`\"\"\"\n self.lower = 0.0\n r\"\"\"(float) - Lower computational dimension extent\"\"\"\n self.upper = 1.0\n r\"\"\"(float) - Upper computational dimension extent\"\"\"\n self.on_lower_boundary = None\n r\"\"\"(bool) - Whether the dimension is crossing a lower boundary.\"\"\"\n self.on_upper_boundary = None\n r\"\"\"(bool) - Whether the dimension is crossing an upper boundary.\"\"\"\n self.units = None\n r\"\"\"(string) Corresponding physical units of this dimension (e.g. \n 'm/s'), ``default = None``\"\"\"\n self.num_ghost = None\n\n # Parse args\n if isinstance(args[0],float):\n self.lower = float(args[0])\n self.upper = float(args[1])\n self.num_cells = int(args[2])\n elif isinstance(args[0],basestring):\n self.name = args[0]\n self.lower = float(args[1])\n self.upper = float(args[2])\n self.num_cells = int(args[3])\n else:\n raise Exception(\"Invalid initializer for Dimension.\")\n \n for (k,v) in kargs.iteritems():\n setattr(self,k,v)", "def _new(cls, rep, shape, domain):\n cls._check(rep, shape, domain)\n obj = object.__new__(cls)\n obj.rep = rep\n obj.shape = obj.rows, obj.cols = shape\n obj.domain = domain\n return obj", "def __init__(self, id, x, y, z):\n self.id = id\n self._dof_x = Dof(id=(id, 'u'), value=x)\n self._dof_y = Dof(id=(id, 'v'), value=y)\n self._dof_z = Dof(id=(id, 'w'), value=z)", "def __init__(self, doscar=\"DOSCAR\", poscar=\"POSCAR\", vasprun=\"vasprun.xml\", ispin=2, lmax=2,\n lorbit=11, spin_orbit_coupling=False, read_pdos=True, max=8, min=-8):\n self.filename = doscar\n self.min = min\n self.max = max\n\n self.ispin = ispin\n self.lmax = lmax\n self.spin_orbit_coupling = spin_orbit_coupling\n if self.spin_orbit_coupling:\n raise NotImplementedError('Spin-orbit coupling is not yet implemented')\n self.lorbit = lorbit\n self.read_header()\n\n start_to_read = self.number_of_header_lines\n df = pd.read_csv(self.filename,\n skiprows=start_to_read,\n nrows=self.number_of_data_points,\n delim_whitespace=True,\n names=['energy', 'up', 'down', 'integral_up', 'integral_down'],\n index_col=False)\n\n if os.path.isfile(vasprun):\n vasprun = Vasprun(vasprun)\n self.efermi = vasprun.efermi\n df[\"energy\"] = df[\"energy\"] - self.efermi\n self.energy = df.energy.values\n self.energy_name = \"E-E_fermi\"\n else:\n self.energy = df.energy.values\n self.energy_name = \"E\"\n\n df.drop('energy', axis=1)\n df.rename(columns={\"energy\": self.energy_name}, inplace=True)\n\n self.tdos = self.scale(df)\n\n if read_pdos:\n try:\n self.pdos_raw = self.read_projected_dos()\n except:\n self.pdos_raw = None\n # if species is set, should check that this is consistent with the number of entries in the\n # projected_dos dataset\n\n self.structure = Poscar.from_file(poscar, check_for_POTCAR=False).structure\n self.atoms_list = [i.name for i in self.structure.species]\n self.starts = [0, ]\n mark = self.atoms_list[0]\n for n, i in enumerate(self.atoms_list):\n if i == mark:\n pass\n else:\n self.starts.append(n)\n mark = i\n self.starts.append(len(self.atoms_list))", "def __init__(self, parent, label, *, firstSheet = None, title = None, filePath = None, readOnly = False, writeOnly = False, computeFormulas = True):\n\n\t\t\t#Initialize Inherited Modules\n\t\t\tsuper().__init__(child_class = self.Sheet)\n\n\t\t\t#Internal Variables\n\t\t\tself.label = label\n\t\t\tself.parent = parent\n\t\t\tself.readOnly = readOnly\n\t\t\tself.filePath = filePath\n\t\t\tself.firstSheet = firstSheet\n\t\t\tself.computeFormulas = computeFormulas\n\t\t\t\n\t\t\tself.imageCatalogue = collections.defaultdict(dict) #Used to catalogue all of the images in the document. {sheet title: {top-left corner cell (row, column): image as a PIL image}}\n\n\t\t\tself.thing = openpyxl.Workbook(write_only = writeOnly)\n\t\t\tself.title = title\n\n\t\t\tif (firstSheet != None):\n\t\t\t\tsheet = self[firstSheet]\n\t\t\t\tsheet.select()", "def __init__(self, *args, **kwargs):\n nargs = len(args) + len(kwargs)\n if nargs == 0:\n raise TypeError(\"one or more arguments required (0 given)\")\n \n first_arg = args[0]\n if isinstance(first_arg, str):\n if nargs > 2 or (nargs > 1 and \"quiet\" not in kwargs):\n raise TypeError(\n \"incorrect arguments for creating Dta from file\"\n )\n self._new_from_file(*args, **kwargs)\n elif isinstance(first_arg, Dta):\n if nargs > 3:\n raise TypeError(\n \"too many arguments to create Dta from existing Dta\"\n )\n self._new_from_dta(*args, **kwargs)\n elif isinstance(first_arg, collections.Iterable):\n self._new_from_iter(*args, **kwargs)\n else:\n raise TypeError(\"Dta cannot be created from these arguments:\")", "def __init__(self, filename=None, filetype=None, instrument=None):\n if filename:\n if instrument == 'Element':\n skipfooter = 4\n header = 1\n drop = 9\n elif instrument == 'Agilent':\n skipfooter = 4\n header = 3\n drop = 3\n else:\n skipfooter = 0\n header = 0\n drop = 0\n\n if filetype == 'xlsx':\n pwd = os.getcwd()\n os.chdir(os.path.dirname(filename))\n self.imported = pd.ExcelFile(filename)\n self.data = self.imported.parse(\n 0, index_col=0, skipfooter=skipfooter, header=header)\n self.data = self.data.drop(self.data.index[:drop], axis=0)\n os.chdir(pwd)\n # TODO xlsx doesnt work with agilent type\n elif filetype == 'csv':\n pwd = os.getcwd()\n os.chdir(os.path.dirname(filename))\n self.data = pd.read_csv(filename, sep=',', index_col=0, skipfooter=skipfooter,\n header=header, engine='python')\n os.chdir(pwd)\n elif filetype == 'asc':\n pwd = os.getcwd()\n os.chdir(os.path.dirname(filename))\n self.data = pd.read_csv(filename, sep='\\t', index_col=0, skipfooter=skipfooter,\n header=header, engine='python')\n self.data = self.data.drop(self.data.index[:drop], axis=0)\n self.data.dropna(axis=1, how='all', inplace=True)\n self.data = self.data.apply(pd.to_numeric, errors='coerce')\n os.chdir(pwd)\n else:\n warnings.warn('File type not supported.')\n\n self.data.index = self.data.index.astype('float32')\n self.time = self.data.index\n self.elements = list(map(elem_resolution, self.data.columns))\n self.data.columns = self.elements\n\n self.srms = pd.ExcelFile('./SRM.xlsx').parse(index_col=0)\n self.sum_koeficients = pd.ExcelFile(\n './default_sum_koef.xlsx').parse(0, index_col=0, header=None).to_dict()[1]\n\n self.srm = None\n self.iolite = None\n self.names = None\n self.internal_std = None\n self.ablation_time = None\n\n self.laser_off = []\n self.laser_on = []\n self.skip = {'bcg_start': 0,\n 'bcg_end': 0,\n 'sample_start': 0,\n 'sample_end': 0} # time in seconds to skip from each bcg and sample\n\n self.filter_line = None\n self.starts = None\n self.ends = None\n self.bcg = None\n self.average_peaks = None\n self.ratio = None\n self.quantified = None\n self.lod = None\n self.correction_elements = None\n self.corrected_IS = None\n self.corrected_SO = None\n\n self.dx = None\n self.dy = None\n self.maps = {}\n self.qmaps = {}\n\n self.regression_values = {}\n self.regression_equations = {}", "def __init__(self, angle = 'deg'):\n \n name = \"Cylindrical\"\n Qstr = [\"r\", \"phi\", \"z\"]\n Xstr = [\"x\", \"y\", \"z\"]\n \n super().__init__(self._csCylindrical_q2x, nQ = 3,\n nX = 3, name = name, \n Qstr = Qstr, Xstr = Xstr,\n maxderiv = None, isatomic = False,\n zlevel = None)\n \n if angle == 'deg' or angle == 'rad':\n self.angle = angle # 'deg' or 'rad'\n else:\n raise ValueError('angle must be ''deg'' or ''rad''.')", "def _constructor(self):\n return dnpdata_collection", "def __init__(self, coord_sys='Cartesian'):\r\n self.name = None \r\n self.chem = None\r\n self.nSpecies = None\r\n self.conc = np.array([0])\r\n self.Kw = None\r\n self.D = None \r\n self.x_coord = None\r\n self.y_coord = None\r\n self.dx = None\r\n self.dy = None\r\n self.dz = None\r\n self.coord_sys = coord_sys", "def __init__(self):\n self.project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.excel_file = os.path.join(self.project_dir, \"data\", \"Literature_Data.xlsx\")\n self.spreadsheet_name = \"Individualized Data\"\n self.filled_output_file = os.path.join(self.project_dir, \"data\", \"filled_data.csv\")\n self.output_file = os.path.join(self.project_dir, \"data\", \"final.csv\")\n self.use_fake_data = False # For testing\n # This instance value \"self.df\" is the pandas DataFrame that contains all of the data\n # from the literature case studies. Manipulating this field is the purpose of this class.\n \n self.num_negative = 500\n self.df = None", "def __init__(self,obs,**kwd):\n from PyAnUtils.pyanfunctions import ExtraOpt\n \n # Extra options\n extraopt = ExtraOpt( [('readws',None),('modeltype',None),\n ('modelname',None)] )\n extraopt.setkwd(kwd)\n \n \n # Initialize dictionary of models, which must be\n # associated to an observable. Also, the hashed\n # name of the pdf used is stored\n self.__models = {}\n self.__pdftypes = {}\n\n # Constructor from a Workspace\n if extraopt.readws: \n out = extraopt.readws\n if type(obs) != str:\n raise RuntimeError(\"Initialization with the 'readws' keyword\"\\\n \" requires the first argument to be the name of the\"\\\n \" observable (str)\")\n if not out[2].has_key(obs):\n raise AttributeError(\"Observable '%s' not found in the \"\\\n \"Workspace\" % obs)\n self.__observable = obs\n self.__setattr__(self.__observable,out[2][obs])\n self.__setattr__\n # Set up the models, need info from the user\n # --- The modeltype (bkg,signal,..)\n if not extraopt.modeltype:\n raise RuntimeError(\"Initialization with the 'readws' keyword\"\\\n \" requires another keyword 'modeltype' to be set\")\n # --- the actual internal name \n if not extraopt.modelname:\n raise RuntimeError(\"Initialization with the 'readws' keyword\"\\\n \" requires another keyword 'modelname' to be set\")\n # --- Set up the models\n self.__setupmodelsfromws(out,extraopt.modeltype,extraopt.modelname)\n # Regular constructor\n else:\n # Initialize observable put the name as attribute\n self.__observable = obs.GetName()\n self.__setattr__(obs.GetName(),obs)", "def __init__(self, lbda=None, bandname=None, zp=None, \n mjd=None, empty=False,**kwargs):\n self.__build__()\n if empty:\n return\n prop = kwargs_update(dict(lbda=lbda, bandname=bandname,mjd=mjd, zp=zp),\n **kwargs)\n self.create(**prop)", "def __init__(self, headers, Y, X, prefix, restype='FCELL'):\n if len(set(headers)) != len(headers):\n grass.error('The names of the variables are not unique!')\n\n self.mtype = restype\n\n self.x_headers = headers[1:] # Names of the coefficient\n self.b_names = [prefix + name for name in self.x_headers]\n self.y_names = Y # Names of Y rasters\n self.x_names = X # Names of X rasters\n\n self.sample_count = len(self.y_names)\n self.factor_count = len(self.x_names[0])\n\n self._y_rasters = []\n self._x_rasters = []\n self._b_rasters = []\n self._init_rasters()", "def __init__(self, file_path):\n # will raise an error if the path is invalid, we don't need an\n # if statement here\n df = pandas.read_excel(file_path)\n\n \"\"\"\n read in the cities using a dictionary comprehension\n dictionary = { key: value for elem in iterable }\n In this case we are reading in the name of the city as the key\n and its corresponding CityLocation object as the value. We\n have made the assumption that each city has a unique name.\n \"\"\"\n #\n self.lulu = [Order(row[1][\"Date\"], row[1][\"Order Number\"],\n row[1][\"Brand\"], row[1][\"Garment\"],\n row[1][\"Count\"], row[1][\"Style name\"])\n for row in df.iterrows()\n if row[1][\"Brand\"] == \"Lululime\"]\n self.lulu = LululimeFactory()\n GarmentMaker(self.lulu)\n # brand = self.lulu[0].brand\n # garment = self.lulu[0].garment\n # print(brand)\n # print(garment)\n\n lulu_order = ((row[1][\"Date\"], row[1][\"Garment\"],\n row[1][\"Brand\"], row[1][\"Garment\"])\n for row in df.iterrows() if\n row[1][\"Brand\"] == \"Lululime\")\n for item in lulu_order:\n print(item)\n\n lulu_order = LululimeFactory()\n # test = GarmentMaker(lulu_order)\n # print(test)\n\n # for lulu in self.lulubrand:\n # print(lulu)\n # print(*self.lulubrand)", "def create(self, lbda=None, source=None, instrument_name=None,\n mjd=None, zp=None, bandname=None, zpsys=\"ab\",\n force_it=False, **meta):\n self._properties[\"lbda\"] = np.float(lbda) if lbda is not None else None\n self._side_properties[\"source\"] = source\n self._side_properties[\"instrument_name\"] = instrument_name\n self._side_properties[\"meta\"] = meta\n # -- Interactive ones\n self._side_properties[\"zpsys\"] = zpsys\n self.mjd = mjd\n self.zp = zp\n self.set_bandname(bandname)\n self._update_()", "def __init__(self, *args):\n this = _ida_hexrays.new_cif_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(__self__, *,\n database_name: pulumi.Input[str],\n table_catalog_id: pulumi.Input[str],\n table_name: pulumi.Input[str],\n column_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n column_wildcard: Optional[pulumi.Input['DataCellsFilterColumnWildcardArgs']] = None,\n name: Optional[pulumi.Input[str]] = None,\n row_filter: Optional[pulumi.Input['DataCellsFilterRowFilterArgs']] = None):\n pulumi.set(__self__, \"database_name\", database_name)\n pulumi.set(__self__, \"table_catalog_id\", table_catalog_id)\n pulumi.set(__self__, \"table_name\", table_name)\n if column_names is not None:\n pulumi.set(__self__, \"column_names\", column_names)\n if column_wildcard is not None:\n pulumi.set(__self__, \"column_wildcard\", column_wildcard)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if row_filter is not None:\n pulumi.set(__self__, \"row_filter\", row_filter)", "def __init__(self, number):\n super(DSS,self).__init__()\n self.number = number\n dsn = get_geodetic_coords()\n self.lon = -dsn[number][0]*math.pi/180.\n self.lat = dsn[number][1]*math.pi/180.\n self.elevation = dsn[number][2]\n self.timezone = dsn[number][3]\n self.name = dsn[number][4]\n self.diam = dsn[number][5]\n xyz = get_cartesian_coordinates()\n self.xyz = xyz[\"DSS %2d\" % number]", "def __init__(self, data):\n self.data = data\n self.columns = Columns(data)\n self.rows = Rows(data)", "def __init__(self,dos):\n self.e = self.double(dos.e,-1.0)\n self.g = self.double(dos.g)\n self.gz = self.double(dos.gz)\n self.cutoffInd = dos.cutoffInd\n self.cutoff = dos.cutoff\n self.de = dos.de", "def __init__(self, symbol=None, Z=None,\n mass=None,\n displacement=None,\n force=None, momentum=None, velocity=None,\n magmom=0.0):\n\n \n self._properties={}\n \n if symbol is None:\n if Z is None:\n raise ValueError, 'Missing symbol or atomic number!'\n symbol = symbols[Z]\n else:\n if Z is None:\n Z = numbers[symbol]\n else:\n if symbols[Z] != symbol:\n raise ValueError, 'Incompatible atomic number and symbol'\n self._properties[\"Z\"] = Z\n self._properties[\"symbol\"] = symbol\n\n if mass is None:\n mass = masses[Z]\n self._properties[\"mass\"] = mass\n\n # below we need to check that we pass valid arg to numpy array ctor. do later.\n if displacement is None:\n self._properties[\"displacement\"] = np.array( (0.0, 0.0, 0.0) )\n else:\n self._properties[\"displacement\"] = np.array(displacement)\n\n if force is None:\n self._properties[\"force\"] = np.array( (0.0, 0.0, 0.0) )\n else:\n self._properties[\"force\"] = np.array(force)\n\n if momentum is None:\n if velocity is None:\n self._properties[\"momentum\"] = np.array((0.0, 0.0, 0.0))\n else:\n self._properties[\"velocity\"] = np.array(velocity)\n else:\n if velocity is not None:\n raise ValueError, \"You can't set both momentum and velocity!\"\n self._properties[\"momentum\"] = np.array(momentum)\n\n self._properties[\"magmom\"] = float(magmom)", "def __init__(self, data, dims, legends=None, tags={}, name=None):\n self.data = data\n self.dims = dims\n \n if legends == None:\n self.legends = [None] * len(self.dims)\n else:\n self.legends = legends[:]\n self.num_cells = float(data.shape[0])\n if type(tags) == str:\n raise Exception('tags must be dict')\n self.tags = tags.copy()\n if name != None:\n self.tags['name'] = name", "def __init__(self, *args):\n this = _ida_hexrays.new_cnumber_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, nrows):\n _table.DSTable_swiginit(self, _table.new_DSTable(nrows))", "def __init__(self, name, rootdir=None, create=True):\n # Guess the dtype based on the class name\n dtype = FreezableAPI.guess_type(self)\n self.m80 = FreezableAPI(dtype, name, rootdir)", "def _new_instance(cls, data, wcs, errors=None, **kwargs):\n return cls(data, wcs, errors=errors, **kwargs)", "def __init__(self, \n modeled_dem_name, \n modern_dem_name, \n outlet_id, \n category_file=None, \n category_values=None, \n weight_file=None,\n weight_values=None):\n\n # save dem names\n self.modern_dem_name = modern_dem_name\n self.modeled_dem_name = modeled_dem_name\n \n # Read and remember the modern DEM\n (self.grid, self.z) = self.read_topography(modern_dem_name)\n self.grid.set_watershed_boundary_condition_outlet_id(outlet_id,\n self.z, \n nodata_value=-9999)\n # Read and remember the modeled DEM \n (self.mgrid, self.mz) = self.read_topography(modeled_dem_name)\n self.mgrid.set_watershed_boundary_condition_outlet_id(outlet_id,\n self.mz, \n nodata_value=-9999)\n if self.mz.size != self.z.size:\n raise ValueError(('Size of provided DEMS is different.'))\n \n if category_file and category_values:\n raise ValueError(('Provide either an array-like structure of catetory ',\n 'values or a filename, not both.'))\n if weight_file and weight_values:\n raise ValueError(('Provide either an array-like structure of weight ',\n 'values or a filename, not both.'))\n if category_file:\n if os.path.exists(category_file):\n catagory_values = np.loadtxt(category_file)\n if catagory_values.size != self.z.size:\n raise ValueError(('Size of catagory array is different than the ',\n 'provided DEM.'))\n if weight_file:\n if os.path.exists(weight_file):\n weight_values = np.loadtxt(weight_file)\n if weight_values.size != self.z.size:\n raise ValueError(('Size of weight array is different than the ',\n 'provided DEM.'))\n try:\n np.asarray(weight_values).size == self.z.size \n except TypeError:\n weight_values = np.ones_like(self.z)\n \n self.category_values = category_values\n self.weight_values = weight_values\n self.cat_vals = np.sort(np.unique(self.category_values[self.grid.core_nodes]))\n self.metric = {}", "def __init__(\n self,\n system,\n class_name,\n header_path_prefix,\n header_extension,\n period_variant=False,\n ):\n self.system = system\n self.class_name = class_name\n self.header_path_prefix = header_path_prefix\n self.header_extension = header_extension\n template = (\n \"<\"\n + str(system.sysd.A.shape[0])\n + \", \"\n + str(system.sysd.B.shape[1])\n + \", \"\n + str(system.sysd.C.shape[0])\n + \">\"\n )\n\n self.period_variant = period_variant\n if period_variant:\n self.class_type = \"PeriodVariant\"\n self.plant_coeffs_header = \"PeriodVariantPlantCoeffs\"\n self.obsv_coeffs_header = \"PeriodVariantKalmanFilterCoeffs\"\n self.loop_header = \"PeriodVariantLoop\"\n else:\n self.class_type = \"StateSpace\"\n self.plant_coeffs_header = \"StateSpacePlantCoeffs\"\n self.obsv_coeffs_header = \"StateSpaceObserverCoeffs\"\n self.loop_header = \"StateSpaceLoop\"\n\n self.ctrl_coeffs_header = \"StateSpaceControllerCoeffs\"\n self.ctrl_coeffs_type = \"frc::\" + self.ctrl_coeffs_header + template\n self.plant_coeffs_type = \"frc::\" + self.plant_coeffs_header + template\n self.obsv_coeffs_type = \"frc::\" + self.obsv_coeffs_header + template\n self.loop_type = \"frc::\" + self.loop_header + template", "def __init__(self, fPath, tRange=None):\n self.rawD = pycdf.CDF(fPath)\n #print(self.d)\n self.tRange = tRange\n\n if self.tRange is not None:\n self.d = {}\n self._filterTimes()\n else:\n self.d = self.rawD.copy()\n\n\n return", "def __init__(self, styles, nature):\n #: Dictionary of key-value pairs, where *keys* are the style names.\n self.styles = styles\n\n #: Cell *nature* used to distinguish the body cells, from the header and the footer.\n self.nature = nature", "def __init__(self, symb, n_days, begin_date=None, end_date=None):\n if n_days == '':\n n_days = 7\n self.symb = symb\n self.original_data = retrieve_stock_info(self.symb)\n if begin_date == '':\n begin_date = None\n if end_date == '':\n end_date = None\n self.begin_date, self.end_date = begin_date, end_date\n self.data = get_subset_dates(self.original_data, begin_date=begin_date,\n end_date=end_date)\n self.data = normalize_data(self.data)\n self.fill_dataset()\n self.n_days = n_days", "def __init__(self, cells):\n self.cells = cells\n # Used by MakeTableData for layout.\n self.idx = None\n self.group = None\n self.rows_in_group = None\n self.starred = None", "def __init__(self):\n self.ascii_filename = None\n\n # name and calc_id\n self.name = None\n self.calc_id = None\n self.i_ts = None\n self.time = None\n # data members\n self.x = None\n self.p = None\n # particle number density normalized to nGJ: n/n_{GJ} \n self.fmci_XP = None\n # PSR parameters\n self.PSR_P = None\n self.PSR_B12 = None\n self.PSR_Lcm = None\n self.PSR_Theta = None\n self.PSR_Chi = None", "def CreateDataContainer(name):\n dc = simpl.DataContainer.New(name)\n return dc", "def build(self):\n\n # Create a custom grid, fe_set \n nfe = 6\n fe_a = 1/4.0\n fe_b = 0.2\n fe_set = [0, 0.004]\n for i in range(1,nfe+1):\n if i < nfe*fe_a:\n fe_set.append(i*fe_b/(nfe*fe_a))\n elif i == nfe: \n fe_set.append(1)\n else:\n fe_set.append(fe_b + (i-nfe*fe_a)*(1-fe_b)/(nfe*(1-fe_a)))\n\n \"\"\"\n Args:\n dae_method = method to use for calcuating derivatives (default = OCLR)\n - BFD1 - 1st order backwards finite difference\n - OCLR - Orthogonal collocation, Lagrange-Radau\n - OCLL - Orthogonal collocation, Lagrange-Legendre\n press_drop = Pressure drop correlation for superficial velocity calc.\n - SimplifiedP - simplified pressure correlations \n - Ergun - Ergun equation\n fe_set = set of normalised finite element locations\n nfe = number of finite elements for bed discretization (default = 15)\n (not used if fe_set specified)\n ncp = number of collocation points (OCLR or OCLL only, default = 3)\n \"\"\" \n\n # Create unit model for fuel reactor\n self.MB_fuel = MB_CLC_fuel.MB(\n parent=self,\n dae_method = 'OCLR',\n press_drop = 'Ergun',\n fe_set = fe_set,\n ncp = 3)", "def __init__(self, name, geometry, envelope, typology, hvac,\n rc_model, comfort, internal_loads, age, solar, supply):\n\n self.name = name\n self.geometry = geometry\n self.geometry['floor_height'] = calc_floor_to_floor_height(self.geometry['height_ag'],\n self.geometry['floors_ag'])\n self.architecture = EnvelopeProperties(envelope)\n self.typology = typology # FIXME: rename to uses!\n self.hvac = hvac\n self.rc_model = rc_model\n self.comfort = comfort\n self.internal_loads = internal_loads\n self.age = age\n self.solar = SolarProperties(solar)\n self.supply = supply\n self.building_systems = self._get_properties_building_systems()", "def create_cells(self):\n raise NotImplementedError(\n \"create_cells function not reimplemented from base class\")", "def create(self):\n # TODO: Properly validate data\n self._proj()\n if self.cfg.align_heading:\n self._align()\n self._griddata()\n if self.cfg.gap_filter[\"algorithm\"] != \"none\":\n self._gap_filter()", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Geometric'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, number_doors, registration_number, make,\n model, year_manufactured, maximum_speed,\n acceleration_rate, deceleration_rate):\n\n self.number_doors = number_doors\n self.registration_number = registration_number\n self.make = make\n self.model = model\n self.year_manufactured = year_manufactured\n self.maximum_speed = maximum_speed\n self.acceleration_rate = acceleration_rate\n self.deceleration_rate = deceleration_rate\n self.mileage_miles = 0\n self.speed_mph = 0", "def __init__(self, infile, sheet, header=True, date_format=\"%Y-%m-%d\"):\n from openpyxl import load_workbook\n \n wb = load_workbook(infile, data_only=True, use_iterators=True, keep_vba=False)\n \n try:\n sheet = wb.worksheets[int(sheet)-1]\n \n except:\n for ws in wb.worksheets:\n if ws.title == sheet:\n sheet = ws\n break\n else:\n raise Exception(\"No worksheet named {0}\".format(sheet))\n \n self.iter = sheet.iter_rows()\n self.date_format = date_format\n \n if header:\n self.fieldnames = self.format_excel_row(self.iter.next())\n self.aliases = dict(zip(\n list(\"col{0}\".format(idx+1) for idx in xrange(len(self.fieldnames))),\n self.fieldnames))\n else:\n self.fieldnames = list(\"col{0}\".format(idx+1) for idx in xrange(len(sheet.columns))),\n self.aliases = None", "def data_frame_creator(self):\n\n return pd.DataFrame()", "def __init__(self,file_path):\n\t\tdata_reader = csv.DictReader(file(file_path,'rU'))\n\t\tfor row in data_reader:\n\t\t\t# we have to turn the strings into floating point numbers.\n\t\t\tc = Compound( name = row['Name'],\n\t\t\t Antoine_params = [float(row['Antoine A']),float(row['Antoine B']),float(row['Antoine C'])],\n\t\t\t mass_density = float(row['Mass Density']),\n\t\t\t MW = float(row['Molecular Weight']),\n\t\t\t #Hvap = float(row['Enthalpy of Vaporization']),\n\t\t\t Cp = float(row['Molar Heat Capacity']) )\n\t\t\t# place it in the dictionary\n\t\t\t#print \"Have just read in \",c\n\t\t\tself[c.name] = c", "def __init__(self, data_source, min_sup=MIN_SUPPORT, eq=False):\n self.thd_supp = min_sup\n \"\"\":type thd_supp: float\"\"\"\n self.equal = eq\n \"\"\":type eq: bool\"\"\"\n self.titles, self.data = DataGP.read(data_source)\n \"\"\":type titles: ndarray\"\"\"\n \"\"\":type data: ndarray\"\"\"\n self.row_count, self.col_count = self.data.shape\n self.time_cols = self.get_time_cols()\n self.attr_cols = self.get_attr_cols()\n self.valid_bins = np.array([])\n self.no_bins = False\n self.step_name = '' # For T-GRAANK\n self.attr_size = 0 # For T-GRAANK", "def __init__(self, label_num, des_dir, des_dim=48):\n self.label_num = label_num\n self.des_dir = des_dir\n self.des_dim = 48", "def from_data(cls, ID, ra, dec, origin, proba=None, confid=None,\n extras=None, **kwargs):\n header = pyfits.Header()\n header['ID'] = (ID, 'object ID %d')\n header['RA'] = (ra, 'RA u.degree %.7f')\n header['DEC'] = (dec, 'DEC u.degree %.7f')\n header['FROM'] = (origin[0], 'detection software')\n header['FROM_V'] = (origin[1], 'version of the detection software')\n header['CUBE'] = (os.path.basename(origin[2]), 'datacube')\n header['CUBE_V'] = (origin[3], 'version of the datacube')\n if proba is not None:\n header['DPROBA'] = (proba, 'Detection probability')\n if confid is not None:\n header['CONFID'] = (confid, 'Confidence index')\n if extras is not None:\n header.update(extras)\n\n return cls(header, filename=None, **kwargs)", "def _instantiate(clz, **data):\n\n new_obj = clz()\n setattr(new_obj, \"data\", data)\n for key, val in deepcopy(data).items():\n setattr(new_obj, key, val)\n return new_obj", "def __init__(self,cell1,site1,cell2,site2,color=None,label=None,linewidth=None,linetype=None):\n self.cell1 = np.array(cell1)\n self.site1 = site1\n self.cell2 = np.array(cell2)\n self.site2 = site2\n coordinate1 = None\n coordinate2 = None\n self.color = color\n self.label = label\n self.linewidth = linewidth\n self.linetype = linetype", "def makecldf(args):\n with_dataset(args, Dataset._install)", "def __init__(self, dtype, name, rootdir=None):\n # Set the m80 name\n self.name = FreezableAPI.validate_freezable_name(name)\n # Set the m80 dtype\n self.dtype = FreezableAPI.validate_freezable_name(dtype)\n\n # Default to the rootdir in the config file\n if rootdir is None:\n self.basedir = Path(cf.options.rootdir).expanduser() / \"datasets\" / API_VERSION / self.slug\n else:\n self.basedir = Path(rootdir).expanduser() / self.slug\n\n # Create the base dir\n os.makedirs(self.basedir, exist_ok=True)\n os.makedirs(self.thawed_dir, exist_ok=True)\n os.makedirs(self.frozen_dir, exist_ok=True)\n\n # init the manifest\n self.manifest\n\n self._col = None\n self._db = None\n self._doc = None", "def __init__(self, input_file):\n self.file_name = input_file\n # Import the excel file:\n self.xlfile = ExcelFile(self.file_name) # to retrieve & work w/ input", "def __init__(self, name, pair_instance, dof_cls):\n self._name = name\n self._dof_cls = dof_cls\n self._pair_instance = pair_instance\n self._indexer = hoomd.data.parameterdicts._SmartTypeIndexer(2)\n self._data = {}", "def __init__(self, name=None, dss=28, date=None, project='SolarPatrol'):\n self.logger = logging.getLogger(logger.name+\".Observation\")\n DR.Observation.__init__(self, name=date, date=date, dss=dss, \n project=project)\n self.extended_init()\n \n #self.obs =Astronomy.Ephem.DSS(dss)\n #y,d = date.split('/')\n #self.year = int(y); self.DOY = int(d)\n #projdatapath, self.sessionpath, rawdatapath = \\\n # DR.get_obs_dirs(project, dss, self.year, self.DOY,\n # datafmt=None)", "def __init__(self, data):\n\n self.__data = np.array(data, dtype=object)\n\n # Get number of rows / columns\n self.__nrows, self.__ncols = self.__data.shape\n\n # Construct the cells\n grid = []\n for i in range(self.__nrows):\n row = []\n for j in range(self.__ncols):\n dcol = self.__data[i, j]\n if dcol is None:\n row.append(cell(i, j, black=True))\n elif dcol == 0:\n row.append(cell(i, j))\n else:\n bot, rig = dcol\n if bot is not None:\n cs = []\n for ii in range(i + 1, self.__nrows):\n if self.__data[ii, j] != 0:\n break\n cs.append((ii, j))\n bot = (bot, tuple(cs))\n if rig is not None:\n cs = []\n for jj in range(j + 1, self.__ncols):\n if self.__data[i, jj] != 0:\n break\n cs.append((i, jj))\n rig = (rig, tuple(cs))\n row.append(cell(i, j, bottom=bot, right=rig))\n grid.append(row)\n self.__tuple = tuple(tuple(row) for row in grid)", "def __init__(self, config):\n \n self.gcs_client = storage.Client()\n self.data = pd.DataFrame()\n\n self.serie_name = config['serie']\n self.start_date = config['start_date']\n self.end_date = config['end_date']\n self.path = config['raw_path']\n self.bucket = config['bucket']", "def __init__(self, sys, rcut, pad):\n self.sys = sys \n self.rcut = rcut \n self.pad = pad\n self.cell_list = CellList(self.sys.box, self.rcut + self.pad)", "def __init__(self, name=None, sheet_name=None, fetch_grid=None, readonly=True):\n if name is not None:\n self.name = name\n if sheet_name is not None:\n self.sheet_name = sheet_name\n\n if fetch_grid is not None:\n self.fetch_grid = fetch_grid\n\n self.readonly = readonly\n self._setup()\n self.fetch()", "def __init__(self):\n\n self.ptr = c_void_p()\n self.obj = byref(self.ptr)\n # container holding attached data\n self.data = {}\n\n LIB.mnt_grid_new.argtypes = [POINTER(c_void_p)]\n ier = LIB.mnt_grid_new(self.obj)\n if ier:\n error_handler(FILE, '__init__', ier)", "def __init__(self, *args):\n this = _ida_hexrays.new_citem_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args, **kwargs):\n try:\n geometry = kwargs['geometry']\n except:\n pass\n # ...\n dim = geometry.dim\n if dim == 1:\n func_one = lambda x : [ 1. ]\n func_zero = lambda x : [ 0. ]\n func_stiff = lambda x : [ 1. ]\n if dim == 2:\n func_one = lambda x,y : [ 1. ]\n func_zero = lambda x,y : [ 0. ]\n func_stiff = lambda x,y : [ 1., 0. \\\n , 0., 1. ]\n if dim == 3:\n func_one = lambda x,y,z : [ 1. ]\n func_zero = lambda x,y,z : [ 0. ]\n func_stiff = lambda x,y,z : [ 1., 0., 0. \\\n , 0., 1., 0. \\\n , 0., 0., 1. ]\n # ...\n\n # ...\n tc_d = {}\n tc_d['A'] = func_stiff\n tc_d['b'] = func_zero\n try:\n tc_d['AllDirichlet'] = kwargs['AllDirichlet']\n except:\n pass\n try:\n tc_d['bc_dirichlet'] = kwargs['bc_dirichlet']\n except:\n pass\n try:\n tc_d['bc_neumann'] = kwargs['bc_neumann']\n except:\n pass\n try:\n tc_d['Metric'] = kwargs['Metric']\n except:\n pass\n # ...\n\n # ...\n poisson.__init__(self, *args, **kwargs)\n self.Dn = basicPDE(geometry=geometry, testcase=tc_d)\n # ...\n\n # ...", "def __init__(self, scope = 100):\n\t\tself.toDICE, self.toTYPE, self.toWORD = self.compileTypes()\t# Compile mappings for TYPE/DICE, WORD/TYPE, and TERM/WORD for DICESearch Class\t\n\t\tsuper(DICESearch, self).__init__()\t\t\t\t\t\t\t# Initiate SpreadsheetSearch for DICESearch Class\n\n\t\tself.addColumn(\"Document\", length = 1)\t# Column 01 (A) to contain name of Document processed\n\t\tself.addColumn(\"DICECode\")\t\t\t\t# Column 02 (B) to contain associated DICE Code with excerpt\n\t\tself.addColumn(\"TYPECode\")\t\t\t\t# Column 03 (C) to contain associated TYPE Code with excerpt\n\t\tself.addColumn(\"Combo\")\t\t\t\t\t# Column 04 (D) to contain presence of a combination of search terms; TRUE or FALSE\n\t\tself.addColumn(\"documentIndex\")\t\t\t# Column 05 (E) to contain location of kernel in document\n\t\tself.addColumn(\"kernelLeft\")\t\t\t# Column 06 (F) to contain excerpt left of kernel with length of scope\n\t\tself.addColumn(\"kernel\")\t\t\t\t# Column 07 (G) to contain kernel (i.e., search term)\n\t\tself.addColumn(\"kernelRight\")\t\t\t# Column 08 (H) to contain excerpt right of kernel with length of scope\n\t\tself.addColumn(\"comboTerms\")\t\t\t# Column 09 (I) to contain associated combination terms found\n\t\tself.addColumn(\"proximity\")\t\t\t\t# Column 10 (J) to contain distance between combination terms if applicable\n\t\tself.addColumn(\"sufficiency\")\t\t\t# Column 11 (K) to contain sufficiency status of kernel and excerpt\n\t\tself.addColumn(\"term\")\t\t\t\t\t# Column 12 (L) to contain associated terms if sufficiency is 1 (i.e., TRUE)\n\n\t\tself.scope = scope\t\t\t\t\t\t# Range of text before and behind the kernel to include", "def new(headings: Optional[List[Any]] = None,\n data: Optional[List[List[Any]]] = None,\n style: Text = 'default',\n align: Text = None,\n device: Text = 'stdout',\n dataframe=None) -> TableD: # mypy: ignore\n\n if dataframe is not None:\n headings = list(dataframe.columns)\n data = dataframe.values.tolist()\n\n return TableD(headings or [], data or [], style, align, device)", "def __init__(self, crystal, wavelength=1.54184, max2theta=180):\n self.wavelength = wavelength\n self.max2theta = np.radians(max2theta)\n self.name = crystal.name\n self.all_dhkl(crystal)\n self.atom_scatter(crystal)\n self.structure_factor(crystal)\n self.rec_matrix = crystal.rec_matrix\n self.intensity()\n self.pxrd()", "def __init__(self, start=300, end=1000, AM15G_excel=AM15PATH):\n self.PLANK_CONST = 6.626e-34 #[J/s]\n self.LIGHT_SPEED = 299792458 #[m/s]\n self.ELEM_CHARGE = 1.602e-19 #[C]\n \n self.wl = np.linspace(start, end, int(end - start + 1))\n \n am15g_df = pd.read_excel(AM15G_excel, sheet = 'SMARTS2', skiporiows = 0, header = 1)\n am15g_wl = am15g_df.ix[:,0] #[nm]\n am15g_power = am15g_df.ix[:,2] #[W/m^2/nm]\n am15g_f = interpolate.interp1d(am15g_wl, am15g_power)\n self.spec = am15g_f(self.wl)", "def __init__(self, name, *args, **kwargs):\n MethodProxy.__init__(self)\n self._varexp = None\n self._cuts = None\n self._weight = None\n self._errorband = None\n self._drawoption = \"\"\n self._drawerrorband = False\n self._addtolegend = True\n self._legenddrawoption = \"\"\n self._stack = False # Stack property!\n self._attalpha = defaultdict(lambda: 1.0)\n self._includeoverflow = False\n self._includeunderflow = False\n if len(args) == 1:\n if args[0].InheritsFrom(\"TH1\"):\n ROOT.TH1D.__init__(self)\n args[0].Copy(self)\n self.SetDirectory(0)\n self.SetName(name)\n if isinstance(args[0], Histo1D):\n self._varexp = args[0]._varexp\n self._cuts = args[0]._cuts\n self._weight = args[0]._cuts\n self._stack = args[0]._stack\n if args[0]._errorband is not None:\n self._errorband = Histo1D(\n \"{}_errorband\".format(name), args[0]._errorband\n )\n if not name.endswith(\"_errorband\"):\n self.DeclareProperties(**args[0].GetProperties())\n self.DeclareProperties(\n **args[0]._errorband.GetProperties(prefix=\"errorband\")\n )\n elif len(args) == 2:\n assert isinstance(args[0], str)\n assert isinstance(args[1], (list, tuple))\n lowbinedges = array(\"d\", args[1])\n ROOT.TH1D.__init__(self, name, args[0], len(lowbinedges) - 1, lowbinedges)\n elif len(args) == 4:\n assert isinstance(args[0], str)\n assert isinstance(args[1], int)\n ROOT.TH1D.__init__(self, name, *args)\n else:\n raise TypeError\n if not name.endswith(\"_errorband\") and self._errorband is None:\n self._errorband = Histo1D(\"{}_errorband\".format(self.GetName()), self)\n for key, value in self.GetTemplate(\n kwargs.get(\"template\", \"common\")\n ).items():\n kwargs.setdefault(key, value)\n self.DeclareProperties(**kwargs)\n self._lowbinedges = IOManager._getBinning(self)[\"xbinning\"]\n self._nbins = len(self._lowbinedges) - 1", "def __init__(self,workspace,gridin,df,csim):\n #Constructor begins here\n self.__workspace=workspace\n self.__grid=gridin\n self.__dflst=pd.read_csv(workspace+'WELL\\\\'+'filelist.csv',sep=',')\n print('*--- Preparing simulation, reading csv wells ---*')\n self.__dfwells=pd.read_csv(workspace+'WELL\\\\'+'arraydata.csv',sep=',')\n self.__nwells=self.__dflst.shape[0]\n self.__df=df\n self.__dfclst={}\n self.__dfcwells={}\n self.__ncwells=0\n if(csim==True):\n self.buildCwells()", "def __init__(self, object_list, table_name, crowdcontext):\n self.cc = crowdcontext\n self.data = {'id': range(len(object_list)), 'object':object_list}\n self.start_id = len(object_list)\n self.cols = [\"id\", \"object\"]\n self.table_name = table_name\n self.presenter = None\n self.project_id = None\n self.project_short_name = None\n self.project_name = None\n\n if type(object_list) is not list:\n raise Exception(\"'object_list' should be a list\")\n if table_name not in self.cc.show_tables():\n try:\n exe_str = \"CREATE TABLE '%s' (id integer, col_name BLOB, value BLOB DEFAULT NULL, PRIMARY KEY(id, col_name))\" %(table_name)\n self.cc.cursor.execute(exe_str)\n except sqlite3.OperationalError:\n raise", "def from_dict(cls, odic):\n return dacite.from_dict(data_class=cls, data=odic)", "def __init__(self, name, type_name, delta, b, D):\n self.name = name\n self.type = type_name\n self.length = 0\n self.delta = delta\n self.b = b\n self.diameter = D", "def __init__( self, xh ):\n super( Domain, self ).__init__( xh )\n self.dg = 0.5*self.dxh # cell jacobian\n return", "def create_from_hdu(cls, hdu, ebins):\n hpx = HPX.create_from_hdu(hdu, ebins)\n colnames = hdu.columns.names\n cnames = []\n if hpx.conv.convname == 'FGST_SRCMAP_SPARSE':\n pixs = hdu.data.field('PIX')\n chans = hdu.data.field('CHANNEL')\n keys = chans * hpx.npix + pixs\n vals = hdu.data.field('VALUE')\n nebin = len(ebins)\n data = np.zeros((nebin, hpx.npix))\n data.flat[keys] = vals\n else:\n for c in colnames:\n if c.find(hpx.conv.colstring) == 0:\n cnames.append(c)\n nebin = len(cnames)\n data = np.ndarray((nebin, hpx.npix))\n for i, cname in enumerate(cnames):\n data[i, 0:] = hdu.data.field(cname)\n\n return cls(data, hpx)", "def __init__(self, a0, a1, b0, b1, c0, c1, d0, d1):\n self.a0 = np.array(a0)\n self.b0 = np.array(b0)\n self.c0 = np.array(c0)\n self.d0 = np.array(d0)\n self.a1 = np.array(a1)\n self.b1 = np.array(b1)\n self.c1 = np.array(c1)\n self.d1 = np.array(d1)", "def __init__(self):\n super().__init__()\n self.location = 0.0\n self.scale = 1.0\n self.type = 'Laplace'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, date_debut, date_fin, lieu_depart, lieu_arrivee, distance, voiture, client, cout):\r\n\r\n self._date_debut = date_debut\r\n self._date_fin = date_fin\r\n self._lieu_depart = lieu_depart\r\n self._lieu_arrivee = lieu_arrivee\r\n self._distance = distance\r\n self._voiture = voiture\r\n self._client = client", "def __init__(self, handle=None):\n f90wrap.runtime.FortranDerivedType.__init__(self)\n self._handle = _pychidg.f90wrap_graphics_bc_t_initialise()", "def __init__(self, *args, **kwargs):\n super(BGDistr, self).__init__()\n # np.seterr(all='raise')\n try:\n self.datasource = kwargs['datasource']\n self.data = dispatch(self.datasource)\n if kwargs['datasource'] == 'custom': # this should go into the dispatch\n # maybe it should be scipy.sparse.csr_matrix(args[0])\n self.data = sp.csr_matrix(args[0])\n else:\n self.data.from_file(kwargs['file'], header=True)\n except KeyError:\n print(\"No datasource or file provided.\")\n except TypeError:\n print(\"The type is not recognized. Choose graph or MRD during initialization.\")\n except nxexp.NetworkXException:\n print(\"Trying to load from a corrupted file or a wrong datasource\")\n # else:\n # # what happens when no kwargs are passed should be defined.\n # pass\n\n # Properties\n if 'is_square_adj' in kwargs:\n self.__is_square_adj = kwargs['is_square_adj']\n else:\n self.__is_square_adj = True", "def New(derived_class):\n dc = derived_class()\n dc.initialize_implementation()\n return dc", "def __init__(self, xarray_obj):\n super(RasterDataArray, self).__init__(xarray_obj)", "def _from_components(cls, shape, file_managers, wcses, header_tables, inventory):\n assert len(file_managers) == len(wcses) == len(header_tables)\n\n datasets = np.empty(len(file_managers), dtype=object)\n for i, (fm, wcs, headers) in enumerate(zip(file_managers, wcses, header_tables)):\n meta = {\"inventory\": inventory, \"headers\": headers}\n datasets[i] = Dataset(fm._generate_array(), wcs=wcs, meta=meta)\n datasets[i]._file_manager = fm\n datasets = datasets.reshape(shape)\n\n return cls(datasets, inventory)", "def __init__(self, calc_id, particle_name, xp_partition):\n tdc_FMCI_XP_Data_Base.__init__(self)\n # name and calc_id\n self.name = particle_name\n self.calc_id = calc_id\n # setup XP_Data --------------------\n sample_dict = dict(name='regular', n_reduce=1, n_min=1)\n self.xp = tdc_XP_Data(calc_id, particle_name, sample_dict, get_weight=True)\n # interface to timetable -----------\n self.timetable = self.xp.timetable\n # setup properties -----------------\n setup_props = tdc_Setup_Props(calc_id)\n # normalization parameters\n self.W0 = setup_props.get_papam('FMPProps/W0')\n self.L = setup_props.get_papam('/GridProps/L')\n # physical parameters from \"setup_properties.h5\"\n self.PSR_P = setup_props.get_papam('/PulsarGapProps/P')\n self.PSR_B12 = setup_props.get_papam('/PulsarGapProps/B_12')\n self.PSR_Lcm = setup_props.get_papam('/GridProps/L_cm')\n # physical parameters from \"cascade.input\": THETA and CHI\n infile=AT.FileInput()\n infile.ReadFile(tdc_Filenames.get_full_filename(calc_id, 'cascade.input'))\n infile.ChangeGroup('GEOMETRY')\n self.PSR_Theta = infile.get_param('THETA')\n infile.ChangeGroup() \n infile.ChangeGroup('DIMENSIONAL_CONSTANTS::PSR_ConstsInitializer')\n self.PSR_Chi = infile.get_param('CHI')\n infile.ChangeGroup() \n # set xp_partition =================\n self.set_xp_partition(xp_partition)", "def __init__(self, diva2d):\n self.diva2d = diva2d\n\n if os.path.isdir(self.diva2d):\n self.contour = os.path.join(self.diva2d, 'input/coast.cont')\n self.parameter = os.path.join(self.diva2d, 'input/param.par')\n self.data = os.path.join(self.diva2d, 'input/data.dat')\n self.valatxy = os.path.join(self.diva2d, 'input/valatxy.coord')\n self.result = os.path.join(self.diva2d, 'output/ghertonetcdf/results.nc')\n self.mesh = os.path.join(self.diva2d, 'meshgenwork/fort.22')\n self.meshtopo = os.path.join(self.diva2d, 'meshgenwork/fort.23')\n logger.info(\"Creating Diva 2D file names and paths\")\n logger.info(\"Contour file: {0}\".format(self.contour))\n logger.info(\"Parameter file: {0}\".format(self.parameter))\n logger.info(\"Data file: {0}\".format(self.data))\n logger.info(\"Valatxy file: {0}\".format(self.valatxy))\n logger.info(\"Result file: {0}\".format(self.result))\n logger.info(\"Mesh file: {0}\".format(self.mesh))\n logger.info(\"Mesh topo file: {0}\".format(self.meshtopo))\n else:\n logger.error(\"%{0} is not a directory or doesn't exist\".format(self.diva2d))", "def __init__(\n self,\n department_id: np.ndarray,\n department_nm: np.ndarray,\n room_bed: np.ndarray,\n move_time: np.ndarray,\n weight: float,\n height: float,\n admin_type: str,\n admin_diag: str,\n admin_date: str,\n birth_date: str,\n race: str,\n sex: str,\n end_date: str,\n end_stay_type: str,\n local_time: np.ndarray,\n medical_hist: np.ndarray,\n surgical_hist: np.ndarray,\n tobacco_hist: str,\n alcohol_hist: str,\n source=EDW_FILES[\"adm_file\"][\"source\"],\n ):\n self.department_id = department_id\n self.department_nm = department_nm\n self.room_bed = room_bed\n self.move_time = move_time\n self.weight = weight\n self.height = height\n self.admin_type = admin_type\n self.admin_diag = admin_diag\n self.admin_date = admin_date\n self.birth_date = birth_date\n self.race = race\n self.sex = sex\n self.end_date = end_date\n self.end_stay_type = end_stay_type\n self.local_time = local_time\n self.medical_hist = medical_hist\n self.surgical_hist = surgical_hist\n self.tobacco_hist = tobacco_hist\n self.alcohol_hist = alcohol_hist\n super().__init__(source)", "def __init__(self):\n super().__init__()\n self.type = 'SparseGridCollocationSampler'\n self.printTag = 'SAMPLER '+self.type.upper()\n self.maxPolyOrder = None #L, the relative maximum polynomial order to use in any dimension\n self.indexSetType = None #TP, TD, or HC; the type of index set to use\n self.polyDict = {} #varName-indexed dict of polynomial types\n self.quadDict = {} #varName-indexed dict of quadrature types\n self.importanceDict = {} #varName-indexed dict of importance weights\n self.maxPolyOrder = None #integer, relative maximum polynomial order to be used in any one dimension\n self.lastOutput = None #pointer to output dataObjects object\n self.ROM = None #pointer to ROM\n self.jobHandler = None #pointer to job handler for parallel runs\n self.doInParallel = True #compute sparse grid in parallel flag, recommended True\n self.dists = {} #Contains the instance of the distribution to be used. keys are the variable names\n self.writeOut = None\n self.indexSet = None\n self.sparseGrid = None\n self.features = None\n self.sparseGridType = None\n self.addAssemblerObject('ROM', InputData.Quantity.one)", "def __init__(self):\n super().__init__()\n self.n = 0.0\n self.p = 0.0\n self.type = 'Binomial'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, *args):\n _gdi_.DCClipper_swiginit(self,_gdi_.new_DCClipper(*args))", "def __init__(self):\n super().__init__()\n self.mu = 0.0\n self.type = 'Poisson'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'" ]
[ "0.5953067", "0.5625412", "0.5618868", "0.5610069", "0.5567573", "0.553519", "0.5409826", "0.5340056", "0.5289628", "0.52513236", "0.52174574", "0.52136445", "0.5164729", "0.5140498", "0.5093613", "0.50666684", "0.5061276", "0.50513726", "0.5045033", "0.5029647", "0.50290495", "0.5028509", "0.5025753", "0.50048083", "0.49846387", "0.4979179", "0.4967918", "0.4956406", "0.49545056", "0.4953326", "0.4952661", "0.49360794", "0.49344823", "0.493215", "0.49313226", "0.49195725", "0.49096882", "0.4906297", "0.48936987", "0.48913848", "0.48829594", "0.4876112", "0.48708546", "0.48662883", "0.4865601", "0.48620844", "0.4856696", "0.48476768", "0.4842522", "0.4839803", "0.48394704", "0.4833548", "0.48290673", "0.48217827", "0.4816237", "0.48151848", "0.4804101", "0.4796271", "0.47957724", "0.47915372", "0.47875437", "0.478641", "0.47857878", "0.47815695", "0.47812665", "0.47810277", "0.4779425", "0.47788465", "0.4778738", "0.47766566", "0.47737733", "0.4765184", "0.47640318", "0.476369", "0.47613826", "0.47610134", "0.47588786", "0.4758809", "0.47555405", "0.47539416", "0.47530735", "0.47524485", "0.47511464", "0.47511438", "0.4748876", "0.47422296", "0.473987", "0.47395638", "0.47394523", "0.47332218", "0.47314087", "0.4730313", "0.47302228", "0.47279826", "0.47271046", "0.47260478", "0.47236773", "0.47163603", "0.47137251", "0.4712285" ]
0.5404663
7
Return all faces where the circumcentre is not infinity.
def get_bounded_faces(self): return [face for face in self.faces if face.is_bounded()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def faces_only(self):\n return self._faces_only", "def get_filtered_faces(self, faces):\n filtered_faces = []\n for face in faces:\n i, j, k = face\n thresh = 100.0\n if self.get_distance_between_points(i, j) > thresh:\n continue\n elif self.get_distance_between_points(j, k) > thresh:\n continue\n elif self.get_distance_between_points(i, k) > thresh:\n continue\n filtered_faces.append(face)\n return filtered_faces", "def _interiorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n interiorIDs = numerix.concatenate((numerix.ravel(XYids[ ..., 1:-1]),\n numerix.ravel(XZids[:, 1:-1,:]),\n numerix.ravel(YZids[1:-1, ...].swapaxes(0, 1))))\n\n from fipy.variables.faceVariable import FaceVariable\n interiorFaces = FaceVariable(mesh=self, value=False)\n interiorFaces[interiorIDs] = True\n return interiorFaces", "def faces(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._faces", "def facet_with_holes(self,):\n return self.facet_with_holes_", "def polyhedronIntersection(self, poly):\n return list(filter(lambda x: x is not False,\n (self.faceIntersection(f) for f in poly.faces)))", "def remove_infinite_bars(dgm, issub):\r\n if issub:\r\n inds = dgm[:, 1] != np.inf\r\n return dgm[inds,:]\r\n else:\r\n inds = dgm[:, 1] != -np.inf\r\n return dgm[inds,:]", "def get_false_positives(detections, faces):\n false_positives = []\n for detection in detections:\n is_positive = False\n for face in faces:\n if intersection_ratio(detection, face) > 0.5:\n is_positive = True\n break\n if not is_positive:\n false_positives.append(detection)\n\n return false_positives", "def cube_faces(xmin, xmax, ymin, ymax, zmin, zmax):\n faces = []\n\n x, y = np.mgrid[xmin:xmax:3j, ymin:ymax:3j]\n z = np.ones(y.shape) * zmin\n faces.append((x, y, z))\n\n x, y = np.mgrid[xmin:xmax:3j, ymin:ymax:3j]\n z = np.ones(y.shape) * zmax\n faces.append((x, y, z))\n\n x, z = np.mgrid[xmin:xmax:3j, zmin:zmax:3j]\n y = np.ones(z.shape) * ymin\n faces.append((x, y, z))\n\n x, z = np.mgrid[xmin:xmax:3j, zmin:zmax:3j]\n y = np.ones(z.shape) * ymax\n faces.append((x, y, z))\n\n y, z = np.mgrid[ymin:ymax:3j, zmin:zmax:3j]\n x = np.ones(z.shape) * xmin\n faces.append((x, y, z))\n\n y, z = np.mgrid[ymin:ymax:3j, zmin:zmax:3j]\n x = np.ones(z.shape) * xmax\n faces.append((x, y, z))\n\n return faces", "def get_faces(image):\n return (image.crop(face) for face in image.faces)", "def outside(self,region):\n fs = FeatureSet()\n for f in self:\n if(f.isNotContainedWithin(region)):\n fs.append(f)\n return fs", "def isfinite(self):\n return not self.isAny( (lambda x: not np.isfinite(x)) )", "def detects_outside_grid(self):\r\n ii = self.rec_track['i']\r\n outside = sum(np.isnan(ii))\r\n\r\n return outside", "def faces(self):\r\n return self._faces", "def check_infinite(coord, sides, coordinates):\n return is_border(coord, sides)\\\n and coord not in coordinates\\\n and (\\\n (coord[0]+1, coord[1]) not in coordinates and (coord[0]-1, coord[1]) not in coordinates\\\n or (coord[0], coord[1]+1) not in coordinates and (coord[0], coord[1]-1) not in coordinates)", "def sides(self) -> Iterable[Face]:\n side_faces = []\n for i in range(0, len(self.bodies[0].faces)):\n if i != self._bottom_index and i != self._top_index:\n side_faces.append(self.bodies[0].faces[i])\n return side_faces", "def surface_mask(self):\n return np.vectorize(lambda name: name in self.nvertices.keys())(self.name)", "def select_faces(maxAngle):\n date_1 = datetime.datetime.now()\n print(\"Start\")\n\n print(maxAngle, maxAngle*180/pi)\n\n # Get the active object\n obj = bpy.context.active_object\n\n # Add the vertices location in an array\n tabVertices = []\n for vertex in obj.data.vertices:\n tabVertices.append(obj.matrix_world @ vertex.co)\n\n # Find the downward vector in function of the angles of the mesh\n matrix_new = obj.matrix_world.to_3x3().inverted()\n no_world = matrix_new @ mathutils.Vector((0,0,-1))\n no_world.normalize() \n print(no_world)\n vecDir = no_world\n\n\n # Arrays for the function C parameters\n tabPoly = []\n tabNormalX = []\n tabNormalY = []\n tabNormalZ = []\n tabFaces = []\n\n tabPoint1X = []\n tabPoint1Y = []\n tabPoint2X = []\n tabPoint2Y = []\n tabPoint3X = []\n tabPoint3Y = []\n tabPoint1Z = []\n tabPoint2Z = []\n tabPoint3Z = []\n\n # Fill the arrays\n for poly in obj.data.polygons:\n tabPoly.append(poly.index)\n tabNormalX.append(poly.normal[0])\n tabNormalY.append(poly.normal[1])\n tabNormalZ.append(poly.normal[2])\n tabFaces.append(0)\n \n tabPoint1X.append(tabVertices[poly.vertices[0]].x)\n tabPoint1Y.append(tabVertices[poly.vertices[0]].y)\n tabPoint2X.append(tabVertices[poly.vertices[1]].x)\n tabPoint2Y.append(tabVertices[poly.vertices[1]].y)\n tabPoint3X.append(tabVertices[poly.vertices[2]].x)\n tabPoint3Y.append(tabVertices[poly.vertices[2]].y)\n tabPoint1Z.append(tabVertices[poly.vertices[0]].z)\n tabPoint2Z.append(tabVertices[poly.vertices[1]].z)\n tabPoint3Z.append(tabVertices[poly.vertices[2]].z)\n\n print(len(tabPoly)) \n\n print(os.path.dirname(__file__))\n # Get the filepath of the dll file\n functionC = ctypes.CDLL(os.path.dirname(__file__) + \"\\\\function.dll\")\n\n # Create array for C function\n seq = ctypes.c_int * len(tabPoly)\n arrIndex = seq(*tabPoly)\n\n seq = ctypes.c_float * len(tabNormalX)\n arrNormalX = seq(*tabNormalX)\n seq = ctypes.c_float * len(tabNormalY)\n arrNormalY = seq(*tabNormalY)\n seq = ctypes.c_float * len(tabNormalZ)\n arrNormalZ = seq(*tabNormalZ)\n\n seq = ctypes.c_int * len(tabFaces)\n arrFaces = seq(*tabFaces)\n\n seq = ctypes.c_float * len(tabPoint1X)\n arrPoint1X = seq(*tabPoint1X)\n seq = ctypes.c_float * len(tabPoint1Y)\n arrPoint1Y = seq(*tabPoint1Y)\n seq = ctypes.c_float * len(tabPoint2X)\n arrPoint2X = seq(*tabPoint2X)\n seq = ctypes.c_float * len(tabPoint2Y)\n arrPoint2Y = seq(*tabPoint2Y)\n seq = ctypes.c_float * len(tabPoint3X)\n arrPoint3X = seq(*tabPoint3X)\n seq = ctypes.c_float * len(tabPoint3Y)\n arrPoint3Y = seq(*tabPoint3Y)\n seq = ctypes.c_float * len(tabPoint1Z)\n arrPoint1Z = seq(*tabPoint1Z)\n seq = ctypes.c_float * len(tabPoint2Z)\n arrPoint2Z = seq(*tabPoint2Z)\n seq = ctypes.c_float * len(tabPoint3Z)\n arrPoint3Z = seq(*tabPoint3Z)\n\n date_3 = datetime.datetime.now()\n \n # Call the C function\n functionC.select_faces(arrIndex,len(tabPoly),arrNormalX,arrNormalY,arrNormalZ,ctypes.c_float(maxAngle),ctypes.c_float(vecDir.x),ctypes.c_float(vecDir.y),ctypes.c_float(vecDir.z),arrFaces,arrPoint1X,arrPoint1Y,arrPoint2X,arrPoint2Y,arrPoint3X,arrPoint3Y,arrPoint1Z,arrPoint2Z,arrPoint3Z)\n\n date_4 = datetime.datetime.now()\n\n # Switch in edit mode\n bpy.ops.object.mode_set(mode = 'EDIT')\n # Deselect all the faces\n bpy.ops.mesh.select_all(action = 'DESELECT')\n bpy.ops.mesh.select_mode(type=\"FACE\")\n # Switch in object mode\n bpy.ops.object.mode_set(mode = 'OBJECT')\n\n # Select all the faces needed support\n for i in range(len(arrFaces)):\n if arrFaces[i] == 1:\n obj.data.polygons[tabPoly[i]].select = True\n print(len(arrFaces))\n \n # Switch in edit mode\n bpy.ops.object.mode_set(mode = 'EDIT')\n print(\"End\")\n\n date_2 = datetime.datetime.now()\n time_delta = (date_2 - date_1)\n total_seconds = time_delta.total_seconds()\n print(total_seconds)\n \n time_delta = (date_4 - date_3)\n total_seconds = time_delta.total_seconds()\n print(\"C time : \", total_seconds)", "def notOnImageEdge(self, tolerance=1):\n fs = FeatureSet()\n for f in self:\n if(f.notOnImageEdge(tolerance)):\n fs.append(f)\n return fs", "def get_outer_boundary_of_voronoi(self):\n edge = [edge for edge in self.edges if not edge.nxt][0]\n # next(obj for obj in objs if obj.val==5)\n first_vertex = edge.origin\n outer_boundary = []\n while (not edge.get_destination() == first_vertex):\n if(edge.get_destination().is_infinity()):\n edge = edge.twin.nxt\n else:\n outer_boundary.append(edge)\n edge = edge.nxt\n outer_boundary.append(edge)\n return outer_boundary", "def solved(self):\n return all(cell == 1 for row in self.faces for cell in row) or all(cell == 0 for row in self.faces for cell in row)", "def find_disconnected_voxels(im, conn=None):\n if im.ndim == 2:\n if conn == 4:\n strel = disk(1)\n elif conn in [None, 8]:\n strel = square(3)\n elif im.ndim == 3:\n if conn == 6:\n strel = ball(1)\n elif conn in [None, 26]:\n strel = cube(3)\n labels, N = spim.label(input=im, structure=strel)\n holes = clear_border(labels=labels) > 0\n return holes", "def cut_region(mesh, box):\n left, right, down, up = box\n\n selection = (\n (mesh.x2 >= left)\n & (mesh.x2 <= right)\n & (mesh.y2 >= down)\n & (mesh.y2 <= up)\n )\n\n elem_selection = selection[mesh.elem]\n\n no_nan_triangles = np.all(elem_selection, axis=1)\n\n elem_no_nan = mesh.elem[no_nan_triangles]\n\n return elem_no_nan, no_nan_triangles", "def countInfinites(mat):\n isFinite = np.all(np.isfinite(mat))\n \n if not isFinite:\n count = 0\n indices = []\n for i in range(0,len(mat)):\n if mat[i] in [-np.inf,np.inf]:\n count+=1\n indices.append(i)", "def faces(self):\n return map(Face, self._top_exp.faces())", "def clearsky_limits(measured, clearsky, csi_max=1.1):\n csi = pvlib.irradiance.clearsky_index(\n measured,\n clearsky,\n max_clearsky_index=np.Inf\n )\n return quality.util.check_limits(\n csi, upper_bound=csi_max, inclusive_upper=True\n )", "def partially(self) -> float:\n return self._coreEstimation[DetailedMaskType.PartlyCoveredFace]", "def faceDivz(self):\n if(self.dim < 3):\n return None\n if getattr(self, '_faceDivz', None) is None:\n # The number of cell centers in each direction\n n = self.vnC\n # Compute faceDivergence operator on faces\n D3 = kron3(ddx(n[2]), speye(n[1]), speye(n[0]))\n # Compute areas of cell faces & volumes\n S = self.r(self.area, 'F', 'Fz', 'V')\n V = self.vol\n self._faceDivz = sdiag(1/V)*D3*sdiag(S)\n return self._faceDivz", "def has_none_planar_faces(self) -> bool:\n return not all(\n is_planar_face(face) for face in self.faces_as_vertices()\n )", "def get_kequiv_by_face_quad(self, conj_faces):\n ADJs=np.array([self.mb.get_adjacencies(face, 3) for face in self.all_faces])\n\n centroids=np.array([self.mtu.get_average_position([v]) for v in self.all_volumes])\n\n ADJsv=np.array([self.mb.get_adjacencies(face, 0) for face in self.all_faces])\n\n ks=self.mb.tag_get_data(self.perm_tag, self.all_volumes)\n #vol_to_pos=dict(zip(M1.all_volumes,range(len(M1.all_volumes))))\n vol_to_pos=dict(zip(self.all_volumes,range(len(self.all_volumes))))\n cont=0\n K_eq=[]\n for f in self.all_faces:\n adjs=ADJs[cont]\n adjsv=ADJsv[cont]\n cont+=1\n if len(adjs)==2:\n v1=adjs[0]\n v2=adjs[1]\n k1 = ks[vol_to_pos[v1]].reshape([3, 3])\n k2 = ks[vol_to_pos[v2]].reshape([3, 3])\n centroid1 = centroids[vol_to_pos[v1]]\n centroid2 = centroids[vol_to_pos[v2]]\n direction = centroid2 - centroid1\n norm=np.linalg.norm(direction)\n uni = np.absolute(direction/norm)\n k1 = np.dot(np.dot(k1,uni), uni)\n k2 = np.dot(np.dot(k2,uni), uni)\n k_harm = (2*k1*k2)/(k1+k2)\n\n vertex_cent=np.array([self.mb.get_coords([np.uint64(a)]) for a in adjsv])\n dx=max(vertex_cent[:,0])-min(vertex_cent[:,0])\n dy=max(vertex_cent[:,1])-min(vertex_cent[:,1])\n dz=max(vertex_cent[:,2])-min(vertex_cent[:,2])\n if dx<0.001:\n dx=1\n if dy<0.001:\n dy=1\n if dz<0.001:\n dz=1\n area=dx*dy*dz\n #area = self.mb.tag_get_data(self.area_tag, face, flat=True)[0]\n #s_gr = self.gama*keq*(centroid2[2]-centroid1[2])\n keq = k_harm*area/(self.mi*norm)\n\n K_eq.append(keq)\n else:\n K_eq.append(0.0)\n self.mb.tag_set_data(self.k_eq_tag, self.all_faces, K_eq)", "def convex_hull(self):\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._convex_hull", "def rook_neighbors_face(self, face):\n edges = self.cw_face_edges(face)\n return list(set([ self.left_region[edge] for edge in edges]))", "def get_no_vertices(self):\r\n return len(self.__neighbours.keys())", "def GetInteriorFacesTet(self):\n\n if not isinstance(self.all_faces,np.ndarray):\n self.GetFacesTet()\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesTet()\n\n face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)\n face_flags[face_flags==True] = 1\n face_flags[face_flags==False] = 0\n interior_faces = self.all_faces[face_flags==False,:]\n\n return interior_faces, face_flags", "def faceIntersection(self, face):\n # Test wether both ends of the edge are in the same half space\n # (relative to <face>'s plane).\n normal = face.normalVect()\n v0 = vector(face.vertices[0])\n vp = vector(self.pvt)\n vn = vector(self.nvt)\n p = normal.dotProduct(vp - v0) * normal.dotProduct(vn - v0)\n if p > 0:\n return False\n elif abs(p) <= COMPARISON_EPSILON or abs(normal.dotProduct(vp - vn) / (normal.norm() * (vp - vn).norm())) <= COMPARISON_EPSILON:\n # print('ah')\n return False\n else:\n interVect = vn + (normal.dotProduct(v0 - vn) /\n normal.dotProduct(vp - vn)) * (vp - vn)\n lastCross = ((vector(face.vertices[-1]) - interVect) *\n (vector(face.vertices[0]) - interVect))\n for i in range(len(face.vertices)):\n cross = ((vector(face.vertices[i]) - interVect) *\n (vector(face.vertices[(i + 1) % len(face.vertices)]) -\n interVect))\n p = cross.dotProduct(lastCross)\n if p < 0:\n return False\n elif p == 0 and cross.norm() != 0:\n if cross.norm() > COMPARISON_EPSILON:\n warnings.warn(\"Cross product's norm is very low\")\n lastCross = cross\n return interVect.coords()", "def volume_mask(self):\n return np.vectorize(lambda name: name not in self.nvertices.keys())(self.name)", "def integral_points_not_interior_to_facets(self):\n n = 1 + self.space_dimension() - self.affine_dimension()\n return tuple(p[0] for p in self._integral_points_saturating() if len(p[1])!=n)", "def _face_center(mesh, face):\n center = [0, 0, 0]\n for vert in face.vertices:\n center = _list_plus(vert, center)\n new_list = [x / len(face.vertices) for x in center]\n return new_list", "def findpoints(self, pl, onetoone=False):\r\n\r\n ans = []\r\n if onetoone:\r\n fl = self.faces[:]\r\n for p in pl:\r\n found = False\r\n for f in fl:\r\n if f.external:\r\n continue\r\n if f.is_inside(p):\r\n fl.remove(f)\r\n found = True\r\n ans.append(f)\r\n break\r\n if not found:\r\n ans.append(None)\r\n\r\n else:\r\n for p in pl:\r\n found = False\r\n for f in self.faces:\r\n if f.external:\r\n continue\r\n if f.is_inside(p):\r\n found = True\r\n ans.append(f)\r\n break\r\n if not found:\r\n ans.append(None)\r\n\r\n return ans", "def finite(x):\n return numpy.array(x)[numpy.isfinite(x)]", "def trim_floating_solid(im):\n holes = find_disconnected_voxels(~im)\n im[holes] = True\n return im", "def lidar_cloud_filtering(cloud: np.ndarray, fov: float) -> np.ndarray:\n min_fov = -(fov/2)*(3.14/180)\n max_fov = (fov/2)*(3.14/180)\n mask = ((cloud[:,2] > -2.00) \n & (np.arctan2(cloud[:,1], cloud[:,0]) > min_fov) \n & (np.arctan2(cloud[:,1], cloud[:,0]) < max_fov))\n cloud = cloud[mask]\n return cloud", "def test_eigenvalues_of_too_few_points_results_in_0():\n a = np.array([5])\n pc = create_point_cloud(a, a, a)\n\n compute_features(pc, [[0]], pc, [\"eigenv_1\", \"eigenv_2\", \"eigenv_3\"], InfiniteCylinder(5))\n\n eigen_val_123 = np.array([pc[keys.point]['eigenv_{}'.format(i)]['data'] for i in [1, 2, 3]])\n assert not np.any(np.isnan(eigen_val_123))\n assert not np.any(np.isinf(eigen_val_123))", "def arg_inf(multilayer):\n out = []\n for ix, x in enumerate(multilayer):\n if numpy.isinf(x.thickness):\n out.append(ix)\n return out", "def get_faces():\n detected_faces = request()\n\n if not detected_faces:\n raise FaceNotDetectedError()\n return detected_faces", "def find_isolated_vertices(self):\n graph = self.__graph_dict\n isolated = []\n for vertex in graph:\n # print(isolated,vertex)\n if not graph[vertex]:\n isolated += [vertex]\n return isolated", "def get_contour(phi):\n eps = 1\n A = (phi > -eps) * 1\n B = (phi < eps) * 1\n D = (A - B).astype(np.int32)\n D = (D == 0) * 1\n Y, X = np.nonzero(D)\n return np.array([X, Y]).transpose()", "def get_contour(phi):\n eps = 1\n A = (phi > -eps) * 1\n B = (phi < eps) * 1\n D = (A - B).astype(np.int32)\n D = (D == 0) * 1\n Y, X = np.nonzero(D)\n return np.array([X, Y]).transpose()", "def _free_indicies(self):\n return np.logical_not(self._fixed_indicies)", "def findFaces(self):\n\t\trects = self.detectAll()\n\t\tif len(rects)==0:\n\t\t\trects = []\n\t\telse:\n\t\t\trects[:, 2:] += rects[:, :2]\n\t\tself.analyzeFrame(rects)", "def get_face_barycenters(self, idx=-1):\n if idx >= len(self.faces):\n raise IndexError\n if idx >= 0:\n v = np.vectorize(lambda x: self.vertices[x], signature='()->(n)')(self.faces[idx])\n return np.mean(v, axis=0)\n else:\n v = self.vertices\n f = self.faces\n return v[f.flatten()].reshape((-1, 3, 3)).mean(axis=1)", "def incident(self):\n incidence_matrix = self.polyhedron().incidence_matrix()\n for V in self.polyhedron().Vrep_generator():\n if incidence_matrix[V.index(), self.index()] == 1:\n yield V", "def incident(self):\n incidence_matrix = self.polyhedron().incidence_matrix()\n for V in self.polyhedron().Vrep_generator():\n if incidence_matrix[V.index(), self.index()] == 1:\n yield V", "def GetInteriorEdgesQuad(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesQuad()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesQuad()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags", "def find_faces(self, img: Array3D, det_prob_threshold: float = None) -> List[BoundingBoxDTO]:\n raise NotImplementedError", "def test_face_in_face(self):\n w = mt.createCube(marker=1, boundaryMarker=1)\n b = w.boundary(2)\n\n pad = mt.createFacet(mt.createCircle(radius=0.2, segments=12,\n isHole=True))\n b2 = pad.boundary(0)\n\n # rotate to match target norm and pos\n rot = pg.core.getRotation(b2.norm(), b.norm())\n pad.transform(rot)\n pad.translate(b.center())\n\n # create a boundary with new marker match the hole\n w.copyBoundary(b2)\n\n w.createBoundary(w.nodes([w.createNode(n.pos()).id() for n in b2.nodes()]),\n marker=2)\n\n #print(w.boundaryMarkers())\n\n mesh = mt.createMesh(w)\n\n #pg.show(mesh)\n # w.exportPLC('pad.poly')\n # mesh.exportBoundaryVTU('b.vtu')\n np.testing.assert_array_equal(pg.unique(pg.sort(mesh.boundaryMarkers())),\n [0, 1, 2])\n\n # print(mesh)\n # mesh.exportBoundaryVTU('b.vtu')\n pg.show(mesh)", "def faces(self):\n res = []\n if(self.isFacesLoaded() == False):\n return res\n for face in self.getCurrentCacheData()['faces']:\n face = Face().setJSONData(face)\n if(face.confidence > self.faceDetector.confThreshold):\n res.append(face)\n return res", "def no_non_adjacent_vertices(self):\n clauses = []\n for v in range(0,self.graph.num_vertices):\n non_neighbours = sorted(list(set(range(0,self.graph.num_vertices))\n - set([v])\n - set(self.graph.edges[v])))\n for nv in non_neighbours:\n for position in range(0,self.graph.num_vertices-1):\n clause = [ ClauseVariable(True,v,position),\n ClauseVariable(True,nv,position+1)]\n clauses.append(clause)\n return clauses", "def getNakedFaceIDs(mesh):\n \n nakedFaces = []\n \n # Get naked vertices\n nPts = list( mesh.GetNakedEdgePointStatus())\n nIDs = [i for i,v in enumerate(nPts) if v == True]\n \n for i in range(mesh.Faces.Count):\n \n # Get face vertices\n f = mesh.Faces.Item[i]\n if f.IsTriangle:\n vts = (f.A,f.B,f.C)\n else:\n vts = (f.A,f.B,f.C,f.D)\n \n # Check if they are naked\n naked = False\n for vt in vts:\n if vt in nIDs:\n naked = True\n \n if naked:\n nakedFaces.append(i)\n \n return nakedFaces", "def faces(self):\r\n \r\n faceset = set()\r\n for faset in self.SCFaset:\r\n for face in sub_lists(faset):\r\n faceset.add(tuple(face))\r\n return list(faceset)", "def convex(self):\n x, y = self.center\n angles = []\n l = len(self.points)\n for i in range(l - 1):\n A = self.points[(i + l - 1) % l]\n B = self.points[i % l]\n C = self.points[(i + 1) % l]\n u = Vector.createFromTwoPoints(A, B)\n v = Vector.createFromTwoPoints(C, B)\n angle = v ^ u\n if angle > pi:\n return True\n return False", "def Delaunay_circumcenters(triangles):\n na = numpy.newaxis\n\n sign = numpy.sign(linalg.det(triangles))\n\n A = triangles[:, 1, :] - triangles[:, 0, :]\n B = triangles[:, 2, :] - triangles[:, 0, :]\n C = numpy.cross(A, B)\n\n return sign[:, na] * (C / linalg.norm(C, axis=1)[:, na])", "def equatorial_zone_vertices(vertices, pole, width=5):\n return [i\n for i, v in enumerate(vertices)\n if np.abs(np.dot(v, pole)) < np.abs(np.sin(np.pi * width / 180))]", "def _box_faces(image):\n for face in image.faces:\n _box_face(image, face)\n return image", "def is_infinite(self):\r\n return self._real.is_infinite() or self._imag.is_infinite()", "def _get_vp_facearea(self, geom):\n if geom.vp is None:\n geom.voronoi(self.pbc, self.ratio)\n f = geom.vp.vp_faces()\n # TODO: Remove small VP faces (may be check pyvoro?)\n # if rm_small:\n # fa = self.vp.vp_face_area(f)\n # f = self.vp.remove_small_faces(f, fa, eps)\n fa = geom.vp.vp_face_area(f)\n # here fa is the list of dictionaries, we make it a 2d numpy array\n # with masked values\n # WARNING: O(nat^2 * nsteps) memory consumption!\n nat = len(fa)\n fa_np = np.zeros((nat, nat), dtype=np.float)\n for iat, ngbr in enumerate(fa):\n for jat, area in ngbr.iteritems():\n fa_np[iat, jat] = area\n fa_np = np.ma.masked_values(fa_np, 0.)\n return fa_np", "def faceDiv(self):\n if getattr(self, '_faceDiv', None) is None:\n n = self.vnC\n # Compute faceDivergence operator on faces\n if(self.dim == 1):\n D = ddx(n[0])\n elif(self.dim == 2):\n D1 = sp.kron(speye(n[1]), ddx(n[0]))\n D2 = sp.kron(ddx(n[1]), speye(n[0]))\n D = sp.hstack((D1, D2), format=\"csr\")\n elif(self.dim == 3):\n D1 = kron3(speye(n[2]), speye(n[1]), ddx(n[0]))\n D2 = kron3(speye(n[2]), ddx(n[1]), speye(n[0]))\n D3 = kron3(ddx(n[2]), speye(n[1]), speye(n[0]))\n D = sp.hstack((D1, D2, D3), format=\"csr\")\n # Compute areas of cell faces & volumes\n S = self.area\n V = self.vol\n self._faceDiv = sdiag(1/V)*D*sdiag(S)\n return self._faceDiv", "def implicit_surface(self, F, y):\n y = y[:, :, None].expand(-1, -1, self.n_primitives, -1)\n y_latent, ldj = self.invertible_network.inverse(F, y)\n norm = torch.sqrt((y_latent**2).sum(-1))\n\n # <0 denotes internal points\n # >0 denotes external points\n # 0 is the boundary hence our primitive\n return norm - self.radius, ldj", "def graphics_nfaces(self):\n nfaces_ = _pychidg.f90wrap_graphics_nfaces(self=self._handle)\n return nfaces_", "def _exteriorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n exteriorIDs = numerix.concatenate((numerix.ravel(XYids[..., 0].swapaxes(0, 1)),\n numerix.ravel(XYids[..., -1].swapaxes(0, 1)),\n numerix.ravel(XZids[:, 0,:]),\n numerix.ravel(XZids[:, -1,:]),\n numerix.ravel(YZids[ 0, ...]),\n numerix.ravel(YZids[-1, ...])))\n\n from fipy.variables.faceVariable import FaceVariable\n exteriorFaces = FaceVariable(mesh=self, value=False)\n exteriorFaces[exteriorIDs] = True\n return exteriorFaces", "def delete_hidden_face(_face):\n bottom_edge = min(\n filter_horizontal_edges(_face.edges, _face.normal),\n key=lambda e: calc_edge_median(e).z,\n )\n hidden = min(\n [f for f in bottom_edge.link_faces], key=lambda f: f.calc_center_median().z\n )\n bmesh.ops.delete(bm, geom=[hidden], context=\"FACES\")", "def cw_face_edges(self,face):\n\n l0 = self.region_link[face]\n if face == self.left_region[l0]:\n l0 = (l0[1], l0[0])\n l = l0\n\n traversing = True\n edges = []\n while traversing:\n edges.append(l)\n r = self.right_region[l]\n if r == face:\n l = self.succ_right[l]\n else:\n l = self.succ_left[l]\n if l == l0:\n traversing = False\n return edges", "def get_none_zero_region(im, margin):\n input_shape = im.shape\n if(type(margin) is int ):\n margin = [margin]*len(input_shape)\n assert(len(input_shape) == len(margin))\n indxes = np.nonzero(im)\n idx_min = []\n idx_max = []\n for i in range(len(input_shape)):\n idx_min.append(indxes[i].min())\n idx_max.append(indxes[i].max())\n\n for i in range(len(input_shape)):\n idx_min[i] = max(idx_min[i] - margin[i], 0)\n idx_max[i] = min(idx_max[i] + margin[i], input_shape[i] - 1)\n return idx_min, idx_max", "def check_nfaces(sections):\n return _check_nentries(sections, \"NFACES\", \"FACES\")", "def boundary(self):\n answer = self.zero()\n for k, v in self.items():\n for idx, cube in enumerate(k):\n acc_dim = sum((cube_l.dimension for cube_l in k[:idx]))\n for i in range(cube.dimension):\n for epsilon in (0, 1):\n new_cube = cube.face(i, epsilon)\n new_k = k[:idx] + (new_cube,) + k[idx + 1:]\n sign_exp = (acc_dim + i + epsilon) % 2\n answer += answer.create({new_k: v * (-1)**sign_exp})\n return answer", "def RemovedFaces(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_RemoveInternalWires_RemovedFaces(self, *args)", "def is_finite(self):\r\n return self._real.is_finite() and self._imag.is_finite()", "def circumcenter(self) -> Point:\n e1, e2, e3 = self.edges\n bisector1 = e1._line.perpendicular(e1.midpoint, plane=self._plane)\n bisector2 = e2._line.perpendicular(e2.midpoint, plane=self._plane)\n return bisector1.meet(bisector2)", "def get_faces(self):\n for i, j in combinations(self.gens, 2):\n c0 = self.triangle_verts[self.vertex_at_mirrors(i, j)]\n f0 = []\n m = self.cox_mat[i][j]\n H = (i, j)\n type = 0\n if self.active[i] and self.active[j]:\n type = 1\n for k in range(m):\n f0.append(self.G.move(self.vtable, 0, (i, j) * k))\n f0.append(self.G.move(self.vtable, 0, (i, j) * k + (i,)))\n elif self.active[i] and m > 2:\n for k in range(m):\n f0.append(self.G.move(self.vtable, 0, (j, i) * k))\n elif self.active[j] and m > 2:\n for k in range(m):\n f0.append(self.G.move(self.vtable, 0, (i, j) * k))\n else:\n continue\n\n reps = set(self.word_generator(parabolic=H))\n reps = self.G.sort_words(reps)\n flist = []\n for word in reps:\n f = tuple(self.G.move(self.vtable, v, word) for v in f0)\n if None not in f and not helpers.check_duplicate_face(f, flist):\n center = self.transform(word, c0)\n coords = [self.vertices_coords[k] for k in f]\n face = DihedralFace(word, f, center, coords, type)\n flist.append(face)\n\n self.face_indices[(i, j)] = flist\n\n self.num_faces = sum(len(L) for L in self.face_indices.values())", "def get_no_cyclic(mesh, elem_no_nan):\n d = mesh.x2[elem_no_nan].max(axis=1) - mesh.x2[elem_no_nan].min(axis=1)\n no_cyclic_elem = np.argwhere(d < 100)\n return no_cyclic_elem.ravel()", "def vertices(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._vertices", "def pixelise_region(coordinates, shapefile):\n return [coordinate for coordinate in coordinates if\n (np.sum(shapefile['geometry'].apply(lambda x: Point(coordinate[1], coordinate[0]).within(x))) != 0) |\n (np.sum(shapefile['geometry'].apply(lambda x: Point(coordinate[3], coordinate[0]).within(x))) != 0) |\n (np.sum(shapefile['geometry'].apply(lambda x: Point(coordinate[1], coordinate[2]).within(x))) != 0) |\n (np.sum(shapefile['geometry'].apply(lambda x: Point(coordinate[3], coordinate[2]).within(x))) != 0)]", "def has_faces(self):\n return len(self._faces) > 0", "def is_polygon_convex(polygon):\n c = center_of_mass_polygon(polygon)\n for i in range(-1, len(polygon) - 1):\n p0 = polygon[i]\n p1 = polygon[i - 1]\n p2 = polygon[i + 1]\n v0 = subtract_vectors(c, p0)\n v1 = subtract_vectors(p1, p0)\n v2 = subtract_vectors(p2, p0)\n a1 = angle_smallest_vectors(v1, v0)\n a2 = angle_smallest_vectors(v0, v2)\n if a1 + a2 > pi:\n return False\n return True", "def box_faces(images):\n return (_box_faces(image) for image in images)", "def nondominated_edges(self):\n return self._nondominated_edges", "def Clear(self, *args):\n return _BRepAlgo.BRepAlgo_FaceRestrictor_Clear(self, *args)", "def inactive_surfaces(self):\n return [surface for surface in self._surfaces if not surface.active]", "def polygon_contains_holes(self, outer_poly):\n contain_list = []\n for hole_polygon in self.hole_list:\n if all(self.polygon_contains(outer_poly, hole_polygon)):\n contain_list.append(hole_polygon)\n return contain_list", "def near_clipping_face(self):\n pln = self.tripod.plane\n l, r, b, t, n, f = self.body.dim\n face = gt.Plin((l, b, -n), (r, b, -n), (r, t, -n), (l, t, -n))\n return pln.TM * face", "def GetInteriorFacesHex(self):\n\n if not isinstance(self.all_faces,np.ndarray):\n self.GetFacesHex()\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesHex()\n\n face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)\n face_flags[face_flags==True] = 1\n face_flags[face_flags==False] = 0\n interior_faces = self.all_faces[face_flags==False,:]\n\n return interior_faces, face_flags", "def hasNoDoubleVertices(self):\n assert all(self.vertices.count(v) == 1 for v in self.vertices)\n return (all(all(v1 == v2 or v1.dist(v2) > COMPARISON_EPSILON for v2 in self.vertices)\n for v1 in self.vertices) and\n all(self.vertices.count(v) == 1 for v in self.vertices))", "def get_faceboxes(self,img,threshold):\n faces = self.mark_detector.extract_cnn_facebox(img,threshold)\n \n\n\n faceboxes = []\n for face in faces:\n start = (face[0],face[1])\n end = (face[2],face[3])\n box = Box.setStartEnd(start,end)\n\n faceboxes.append(box)\n \n return faceboxes", "def iter_unsolved_cells(self):\n\t\treturn (\n\t\t\tcell for cell in\n\t\t\tself._cells\n\t\t\tif not cell.value()\n\t\t)", "def find_faces(self, selector: _face_selector_types) -> Sequence[Face]:\n selector_faces = _flatten_face_selectors(selector)\n result = []\n for body in self.bodies:\n for face in _find_coincident_faces_on_body(body.brep, selector_faces):\n result.append(body.faces[_face_index(face)])\n return result", "def _getFaces(self, it_mesh_poly):\n\n faces = []\n it_mesh_poly.reset()\n while not it_mesh_poly.isDone():\n faces += it_mesh_poly.getVertices()\n it_mesh_poly.next(1)\n\n return faces", "def _get_visible_components(self, bounds):\n if bounds is None:\n return [c for c in self.components if c.visible]\n\n return [c for c in self.components\n if intersect_bounds(c.rect, bounds) != empty_rectangle]", "def filter_directions(self):\n dot_products = np.matmul(self.box, self.direction)\n return self.box[[True if dp > 0 else False for dp in dot_products]]", "def voronoi_finite_polygons_2d(self, vor, radius=None):\n # print(\"Running voronoi_finite_polygons_2d\")\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n new_regions = []\n new_vertices = vor.vertices.tolist()\n new_ridge_vertices = []\n vor_ridge_vertices = vor.ridge_vertices\n for p in vor_ridge_vertices:\n if all(i >= 0 for i in p):\n new_ridge_vertices.append(p)\n\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points,\n vor.ridge_vertices):\n all_ridges.setdefault(\n p1, []).append((p2, v1, v2))\n all_ridges.setdefault(\n p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region): # p1 is a counter (0,1, etc), region is the region \"name (label)\" for the p1th point\n vertices = vor.regions[region] # Returns the vertices that corresponds to the \"region_th\" region. Region starts at 1\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n # reconstruct a non-finite region\n ridges = all_ridges[p1] # Get a list of all ridges surrounding that point [(p2, v1, v2)]\n new_region = [v for v in vertices if v >= 0] # new_region contains all the finite vertices from std vor\n for p2, v1, v2 in ridges:\n if v2 < 0: # Why is this here? Just to flip order?\n v1, v2 = v2, v1\n if v1 >= 0: # v1 is always the one that could be at infinity\n # finite ridge: already in the region\n continue\n # Compute the missing endpoint of an\n # infinite ridge\n t = vor.points[p2] - \\\n vor.points[p1] # tangent\n t /= np.linalg.norm(t) # Normalize\n n = np.array([-t[1], t[0]]) # normal\n midpoint = vor.points[[p1, p2]]. \\\n mean(axis=0)\n direction = np.sign(\n np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + \\\n direction * radius\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n new_ridge_vertices.append([v2, len(new_vertices)-1])\n\n # Sort region counterclockwise.\n vs = np.asarray([new_vertices[v]\n for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(\n vs[:, 1] - c[1], vs[:, 0] - c[0])\n new_region = np.array(new_region)[\n np.argsort(angles)]\n new_regions.append(new_region.tolist())\n return new_regions, np.asarray(new_vertices), new_ridge_vertices", "def test_cube_attribute_no_seapoints(self):\n result = _make_mask_cube(\n self.mask, self.coords, [self.lower, self.upper], self.units\n )\n self.assertEqual(\n result.attributes[\"topographic_zones_include_seapoints\"], \"False\"\n )" ]
[ "0.6049252", "0.6032412", "0.5654293", "0.5523289", "0.55073357", "0.5405898", "0.5375053", "0.53417516", "0.52805036", "0.52619165", "0.52610266", "0.5258601", "0.5257462", "0.5247194", "0.5216561", "0.52134645", "0.5212665", "0.5200357", "0.5186478", "0.51604086", "0.51563865", "0.51120424", "0.51012987", "0.50975215", "0.5076277", "0.5067898", "0.50586164", "0.50479555", "0.50407946", "0.5019433", "0.5001819", "0.49806586", "0.49793783", "0.49793708", "0.49637592", "0.49573445", "0.49473727", "0.49455667", "0.49401444", "0.49366337", "0.49252546", "0.49236742", "0.4920903", "0.49130335", "0.49116194", "0.4904163", "0.48985744", "0.48985744", "0.48840335", "0.48839834", "0.48744723", "0.48695073", "0.48695073", "0.48638052", "0.4863354", "0.4863177", "0.48515874", "0.48418126", "0.4836709", "0.48109892", "0.4810779", "0.48102587", "0.48080078", "0.48060817", "0.48017433", "0.48004627", "0.47893745", "0.47765335", "0.47755224", "0.47730246", "0.4752225", "0.47500554", "0.47468665", "0.4734289", "0.4733711", "0.47322863", "0.47268617", "0.47218847", "0.47165877", "0.4715056", "0.47143915", "0.4711723", "0.47038275", "0.4698977", "0.4696039", "0.46960375", "0.46939662", "0.46927762", "0.46914324", "0.4690125", "0.46839887", "0.46813977", "0.46695426", "0.46678248", "0.46659043", "0.46629673", "0.4660503", "0.46583763", "0.46542034", "0.4654142" ]
0.5622148
3
Add a face to DCEL if it doesn't already exists, otherwise return the existing face.
def add_face(self, face): try: face_idx = self.faces.index(face) return self.faces[face_idx] except Exception: self.faces.append(face) return face
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_face(self, face):\n\n if face.uuid is None:\n face.uuid = self._generate_uuid()\n\n if face.uuid in self._faces:\n error_str = \"Trying to add an already existing face with uuid: \"\\\n + str(face.uuid)\n raise KeyError(error_str)\n\n self._faces[face.uuid] = Face.from_face(face)\n\n return face.uuid", "def update_face(self, face):\n\n if face.uuid not in self._faces:\n error_str = \"Trying to update a non-existing face with uuid: \"\\\n + str(face.uuid)\n raise KeyError(error_str)\n\n if not isinstance(face, Face):\n error_str = \"Trying to update an object with the wrong type. \"\\\n + \"Face expected.\"\n raise TypeError(error_str)\n\n face_to_update = self._faces[face.uuid]\n\n face_to_update.data = face.data\n face_to_update.points = face.points", "def addFace(self, vertices, bypassCheck=False):\n try:\n if bypassCheck:\n raise ValueError\n return self.getFace(vertices)\n except ValueError:\n if any(vertices.count(v) > 1 for v in vertices):\n raise ValueError('The face given is invalid: '\n 'two or more vertices are identical')\n newF = face(vertices)\n self.faces.append(newF)\n for i in range(len(vertices)):\n try:\n e = self.getEdge(vertices[i],\n vertices[(i + 1) % len(vertices)])\n except ValueError:\n e = self.addEdge(vertices[i],\n vertices[(i + 1) % len(vertices)])\n e.linkFace(newF)\n return newF", "def _add_facet(self, ea, eb, ec):\n f = Facet2(ea, eb, ec)\n i = bisect(self.edges, f)\n if len(self.facets) > i and self.facets[i] == f:\n return self.facets[i]\n \n self.facets.insert(i, f)\n return f", "def change_face(self, face):\n if self.face is not None:\n self.face.remove_point(self)\n\n self.face = face\n self.face.add_point(self)", "def get_face(self, uuid):\n\n try:\n return Face.from_face(self._faces[uuid])\n except KeyError:\n error_str = \"Trying to get an non-existing face with uuid: {}\"\n raise ValueError(error_str.format(uuid))", "def insertFace(bm, v):\n a = []\n for k in range(len(v)):\n a.append(bm.verts[v[k]])\n f = bm.faces.new(a)\n bm.faces.ensure_lookup_table()\n return f", "def add_face(self, vertices: Iterable[\"Vertex\"]) -> None:\n self.faces.append(self.add_vertices(vertices))", "def getFace(self, vertices):\n for f in self.faces:\n if f.vertices == vertices:\n return f\n raise ValueError('No face found')", "def add(self, cell, overwrite_duplicate=False):\n if isinstance(cell, Cell):\n if (not overwrite_duplicate and cell.name in self.cell_dict and\n self.cell_dict[cell.name] is not cell):\n raise ValueError(\"[GDSPY] cell named {0} already present in \"\n \"library.\".format(cell.name))\n self.cell_dict[cell.name] = cell\n else:\n for c in cell:\n if (not overwrite_duplicate and c.name in self.cell_dict and\n self.cell_dict[c.name] is not c):\n raise ValueError(\"[GDSPY] cell named {0} already present \"\n \"in library.\".format(c.name))\n self.cell_dict[c.name] = c\n return self", "def selectmeshface(self):#Not used yet\n go = Rhino.Input.Custom.GetObject()\n go.GeometryFilter=Rhino.DocObjects.ObjectType.MeshFace\n go.SetCommandPrompt(\"Get mesh Face\")\n go.Get()\n objref=go.Object(0)\n face_guid = objref.ObjectId\n go.Dispose()\n \n return face_guid", "def addPhoto(fileName, personName):\n\n #Check if image is a jpg\n if (fileName[-4:] != \".jpg\"):\n print(\"\\n[!] File extenstion must be .jpg!\\n\")\n return\n\n #Check image exists\n if (not os.path.isfile(fileName)):\n print(\"\\n[!] File does not exist!\\n\")\n return\n\n #Check no illegal characters in file name\n for c in ILLEGAL_FILE_NAMES:\n if (c in personName):\n print(\"\\n[!] Provided name contains an illegal argument\\n\")\n return\n\n #Load image\n image = face_recognition.load_image_file(fileName)\n\n #Use the name in the filename as the identity key\n identity = os.path.splitext(os.path.basename(fileName))[0]\n\n #Get the face location\n locationsHog = hogDetectFaceLocations(image)\n\n locationsHaar = haarDetectFaceLocations(image)\n\n #Get the face encoding\n encodingsHaar = face_recognition.face_encodings(image, locationsHaar)\n encodingsHog = face_recognition.face_encodings(image, locationsHog)\n\n #check if exactly one face is in the photo\n if ((len(encodingsHaar) == 0) or (len(encodingsHog) == 0)):\n print(\"\\n[!] No face detected in the provided photo\\n\")\n return\n\n elif ((len(encodingsHaar) > 1) or (len(encodingsHog) > 1)):\n print(\"\\n[!] More than one face detected in the provided photo\\n\")\n return\n\n #Set path to respective dataset\n directoryToAddTo = DATABASE_PATH + personName\n\n #Look for directory\n exists = False\n for subdir, dirs, files in os.walk(DATABASE_PATH):\n if (subdir == directoryToAddTo):\n exists = True\n\n #If directory doesnt exist, make it\n if (not exists):\n os.mkdir(directoryToAddTo)\n\n #Save data to file\n np.savetxt((directoryToAddTo + \"/\" + identity + \"Haar.txt\"),\n encodingsHaar[0])\n np.savetxt((directoryToAddTo + \"/\" + identity + \"Hog.txt\"),\n encodingsHog[0])\n\n print(\"\\n[*] Face successfully added!\\n\")", "def add_new_known_face(new_file_name, known_face_encodings, known_face_names):\n face_encoding = read_face_encoding(new_file_name)\n known_face_encodings.append(face_encoding)\n\n known_face_names.append(new_file_name)\n\n return known_face_encodings, known_face_names", "def setFace(self, value):\n self.face = value", "def setFace(self, value):\n self.face = value", "def face(self):\n\n return self.faceup", "def log_in_database(name, face):\n #FIX: Curtain name array and face array to the first element of each respective array.\n if len(name) != 0:\n faces_present = 1\n name = name[0]\n face = face[0]\n\n #Check if the names and faces pickle exists.\n if (file_path/\"names_and_faces.pkl\").exists():\n\n #Load the pickled dictionary.\n with open(file_path/\"names_and_faces.pkl\", mode = \"rb\") as opened_file:\n names_and_faces = pickle.load(opened_file)\n\n #If the person's name is already in the dictionary, then append the descriptor to the end of the value as part of a list.\n if name in names_and_faces.keys():\n names_and_faces[name].append(face)\n\n #If the person's name is not in the dictionary, make a new dictionary entry.\n else:\n names_and_faces[name] = [face]\n #If there is no dictionary, make a new dictionary.\n else:\n names_and_faces = {}\n names_and_faces[name] = [face]\n\n #Save the dictionary.\n with open(file_path/\"names_and_faces.pkl\", mode = \"wb\") as opened_file:\n pickle.dump(names_and_faces, opened_file)\n else:\n faces_present = 0\n return faces_present", "def markIntersectedWith(self, face):\n try:\n self.hasIntersected.add(face)\n except AttributeError:\n self.hasIntersected = set()\n self.hasIntersected.add(face)", "def is_existing_face(image, trackers, face):\n\n x1, y1, w1, h1 = face\n face_mask = np.zeros_like(image)\n face_mask[y1:y1+h1, x1:x1+w1] = 1\n for t in trackers:\n try:\n x,y,w,h = t.bounding_box\n t_mask = np.zeros_like(image)\n t_mask[y:y+h, x:x+w] = 1\n\n union = np.sum(np.bitwise_or(face_mask, t_mask))\n intersection = np.bitwise_and(face_mask, t_mask)\n if float(np.sum(intersection))/union > 0.3 or float(np.sum(intersection))/np.sum(t_mask+1) > 0.7:\n return (t, True)\n except Exception:\n pass\n \n return (None, False)", "def linkFace(self, f):\n if self.pFace is None:\n self.pFace = f\n elif self.nFace is None:\n self.nFace = f\n else:\n raise ValueError('Edge is already linked to two faces')", "def add_vertex(self, vertex):\n try:\n vertex_idx = self.vertices.index(vertex)\n # print \"{} already in {}\".format(vertex, self.vertices)\n return self.vertices[vertex_idx]\n except Exception:\n self.vertices.append(vertex)\n # print \"adding {} to {}\".format(vertex, self.vertices)\n return vertex", "def add_image_face():\n\n try:\n img = decode_image(request.files[\"image\"].read())\n except Exception as e:\n log.error(e)\n data = {\"error\": \"Error while loading image\"}\n return jsonify(data), 500\n save_picture = False\n if request.args.get(\"save\") == \"true\":\n save_picture = True\n \n face_img, _ = processor.extract_faces()\n #TODO\n #1. get several images if possible\n #2. save face_img array as picture if save_picture == True\n #3. pipe face_img array to embedder --> embedder needs to be modified to not from a folder, but from array of face_img\n #4. get the embedder result, insert to a pickle object --> can be section ID, or whatever", "def _merge_face(merge, image, face):\n scaled = merge.image.resize(face.as_dimension()).convert(\"RGBA\")\n image.image = image.image.convert(\"RGBA\")\n image.image.paste(scaled, face.as_box(), mask=scaled)\n return image", "def draw_face_box(data):\n head = extract_head(data);\n face_box = data['position_data']['face_box'][data['i']];\n sefs = data['sefs'][data['i']];\n if face_box is not None:\n cv2.rectangle(head, *face_box, (0, 255, 0));\n else:\n cv2.rectangle(head, *sefs, (0, 0, 255));\n return head;", "def test_face_in_face(self):\n w = mt.createCube(marker=1, boundaryMarker=1)\n b = w.boundary(2)\n\n pad = mt.createFacet(mt.createCircle(radius=0.2, segments=12,\n isHole=True))\n b2 = pad.boundary(0)\n\n # rotate to match target norm and pos\n rot = pg.core.getRotation(b2.norm(), b.norm())\n pad.transform(rot)\n pad.translate(b.center())\n\n # create a boundary with new marker match the hole\n w.copyBoundary(b2)\n\n w.createBoundary(w.nodes([w.createNode(n.pos()).id() for n in b2.nodes()]),\n marker=2)\n\n #print(w.boundaryMarkers())\n\n mesh = mt.createMesh(w)\n\n #pg.show(mesh)\n # w.exportPLC('pad.poly')\n # mesh.exportBoundaryVTU('b.vtu')\n np.testing.assert_array_equal(pg.unique(pg.sort(mesh.boundaryMarkers())),\n [0, 1, 2])\n\n # print(mesh)\n # mesh.exportBoundaryVTU('b.vtu')\n pg.show(mesh)", "def paint_faces_data(frame, faces_data):\n for face in faces_data:\n (top, right, bottom, left) = face['location']\n\n if face['identity'] is None:\n name = 'Unknown'\n color = (0, 0, 255) # red\n else:\n name = face['identity']\n color = (0, 128, 0) # dark green\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)\n cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)", "def _load_known_face(self):\n faces_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'faces')\n faces = [os.path.join(faces_dir, f) for f in os.listdir(faces_dir) if f.endswith('.jpeg')]\n known_images = [face_recognition.load_image_file(i) for i in faces]\n self.known_faces = []\n for image in known_images:\n encoding = face_recognition.face_encodings(image)\n if len(encoding) > 0:\n logging.debug('Adding known face')\n self.known_faces.append(encoding[0])", "def mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]", "def face_with_verts(bm, verts, default=None):\n for face in bm.faces:\n if len(set(list(face.verts) + verts)) == len(verts):\n return face\n return default", "def loginWithFace(self):\n #store, detector, recognizer, pca\n if self.store[\"face_added\"] == True:\n success = loginWithFace(self.store, self.detector, self.recognizer, self.pca)\n if success:\n self.clearScreen()\n from screen3 import Screen3\n Screen3(self.parent, self.store)\n else:\n self.temporaryLabel(self.frame2, \"Login failed. Try again\",\n x=220, y=420, fg=\"#F00\", second=1)\n else:\n self.temporaryLabel(self.frame2, \"You haven't added face recognition\",\n x=220, y=420, fg=\"#F00\", second=1)", "def trackFace():\n\n\t\t# start face tracker\n\t\tself.track.setWholeBodyOn(False)\n\t\tself.track.startTracker()", "def get_face(self, image):\n face = self.__detect_face(image)[0]\n x1, y1, x2, y2, _, _ = face.left(), face.top(), \\\n face.right() + 1, face.bottom() + 1, face.width(), face.height()\n return image[y1:y1 + (y2 - y1), x1:x1 + (x2 - x1), :]", "def recognize_face(a_face):\r\n if not type(a_face) is TopoDS_Face:\r\n print(\"Please hit the 'G' key to switch to face selection mode\")\r\n return False\r\n surf = BRepAdaptor_Surface(a_face, True)\r\n surf_type = surf.GetType()\r\n if surf_type == GeomAbs_Plane:\r\n print(\"Identified Plane Geometry\")\r\n # look for the properties of the plane\r\n # first get the related gp_Pln\r\n gp_pln = surf.Plane()\r\n location = gp_pln.Location() # a point of the plane\r\n normal = gp_pln.Axis().Direction() # the plane normal\r\n # then export location and normal to the console output\r\n print(\r\n \"--> Location (global coordinates)\",\r\n location.X(),\r\n location.Y(),\r\n location.Z(),\r\n )\r\n print(\"--> Normal (global coordinates)\", normal.X(), normal.Y(), normal.Z())\r\n elif surf_type == GeomAbs_Cylinder:\r\n print(\"Identified Cylinder Geometry\")\r\n # look for the properties of the cylinder\r\n # first get the related gp_Cyl\r\n gp_cyl = surf.Cylinder()\r\n location = gp_cyl.Location() # a point of the axis\r\n axis = gp_cyl.Axis().Direction() # the cylinder axis\r\n # then export location and normal to the console output\r\n print(\r\n \"--> Location (global coordinates)\",\r\n location.X(),\r\n location.Y(),\r\n location.Z(),\r\n )\r\n print(\"--> Axis (global coordinates)\", axis.X(), axis.Y(), axis.Z())\r\n elif surf_type == GeomAbs_BSplineSurface:\r\n print(\"Identified BSplineSurface Geometry\")\r\n # gp_bsrf = surf.Surface()\r\n # degree = gp_bsrf.NbUKnots()\r\n # TODO use a model that provided BSplineSurfaces, as1_pe_203.stp only contains\r\n # planes and cylinders\r\n else:\r\n # TODO there are plenty other type that can be checked\r\n # see documentation for the BRepAdaptor class\r\n # https://www.opencascade.com/doc/occt-6.9.1/refman/html/class_b_rep_adaptor___surface.html\r\n print(surf_type, \"recognition not implemented\")", "def trackFace(self):\n\n\t\t# start face tracker\n\t\tself.track.setWholeBodyOn(False)\n\t\tself.track.startTracker()", "def update_known_face(new_file_name, known_face_encodings, known_face_names):\n name = new_file_name.split('.jpg')[0].split('/')[-1]\n finding = False\n\n for i in range(0, len(known_face_names)):\n if name == known_face_names[i]:\n finding = True\n face_encoding = read_face_encoding(new_file_name)\n known_face_encodings[i] = face_encoding\n break\n if finding:\n return known_face_encodings, known_face_names\n else:\n raise Exception(\"\\n\\nERROR: there are no name \\'\" + name + \"\\' in the base of known faces\\n\\n\")", "def load_dlib_frontal_face_detector():\n return DlibDetector(dlib.get_frontal_face_detector())", "def map_face(self):\n #Array Order: U0,D1,R2,L3,F4,B5,\n \n cube_list = []\n cube_list = self.cube.definition()\n \n for index, cubit in enumerate(self.faces['Up']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index]])\n for index, cubit in enumerate(self.faces['Ri']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+9]])\n for index, cubit in enumerate(self.faces['Ft']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+18]])\n for index, cubit in enumerate(self.faces['Dn']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+27]])\n for index, cubit in enumerate(self.faces['Le']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+36]])\n for index, cubit in enumerate(self.faces['Bk']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+45]])", "def face(gray=False):\n import bz2\n import os\n with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:\n rawdata = f.read()\n data = bz2.decompress(rawdata)\n face = fromstring(data, dtype='uint8')\n face.shape = (768, 1024, 3)\n if gray is True:\n face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')\n return face", "def setDieFaceValue(self, faceValue):\n if faceValue in AngryDie.ANGRY_VALUES:\n self.currentValue = faceValue\n self.value = AngryDie.ANGRY_VALUES[faceValue]", "def find_face(im, bias=None, padding=0, min_padding=0):\n\n # Import optional libraries required for face detection\n import dlib\n from skimage import io as skimage_io\n skimage_io.use_plugin('pil')\n\n # Check we have already aquired a face detector and if not do so now\n if not hasattr(Variation, '_face_detector'):\n Variation._face_detector = dlib.get_frontal_face_detector()\n\n # Convert the image to an array that can be read by skimage\n w, h = im.size\n skimage_im = numpy.array(im.getdata(), numpy.uint8).reshape(h, w, 3)\n\n d = dlib.get_frontal_face_detector()\n faces = d(skimage_im, 1)\n\n # Detect faces\n faces = Variation._face_detector(skimage_im, 1)\n\n # If no faces were detected there's nothing more to do, we return `None`\n if len(faces) == 0:\n return\n\n # If a face was found apply any bias and padding to it\n face = faces[0]\n rect = [face.left(), face.top(), face.right(), face.bottom()]\n\n # Apply bias\n if bias:\n # Shift the center of the face\n bias_x = int(face.width() * bias[0])\n bias_y = int(face.width() * bias[1])\n rect[0] += bias_x\n rect[1] += bias_y\n rect[2] += bias_x\n rect[3] += bias_y\n\n # Apply padding\n if padding > 0:\n\n # Determine the maximum amount of padding that can be applied in any\n # direction.\n max_padding = rect[0]\n max_padding = min(rect[1], max_padding)\n max_padding = min(rect[2], max_padding)\n max_padding = min(rect[3], max_padding)\n\n # Calculate the padding to apply\n pad = [\n int(face.width() * padding),\n int(face.height() * padding)\n ]\n\n # Ensure that the minimum padding is observed\n if min_padding > 0:\n pad = [\n min(pad[0], max(max_padding, int(pad[0] * min_padding))),\n min(pad[1], max(max_padding, int(pad[1] * min_padding)))\n ]\n\n # Apply the padding to the face rectangle\n rect[0] = max(rect[0] - pad[0], 0)\n rect[1] = max(rect[1] - pad[1], 0)\n rect[2] = min(rect[2] + pad[0], im.size[0])\n rect[3] = min(rect[3] + pad[1], im.size[1])\n\n return rect", "def detect_face(self, img, img_file_path=None):\n #use dlib face detector\n #create dlib detector, this is hog with svm\n detector = dlib.get_frontal_face_detector()\n #win = dlib.image_window()\n if img_file_path:\n img = dlib.load_rgb_image(img_file_path)\n #detect number of faces in an image\n dets = detector(img)\n list_face_coord = [] # this will store left, top, right, bottom\n for i, d in enumerate(dets):\n list_face_coord.append((d.left(), d.top(), d.right(), d.bottom()))\n return list_face_coord", "def add_cell(self, cell):\n\n if cell.uuid is None:\n cell.uuid = self._generate_uuid()\n\n if cell.uuid in self._cells:\n error_str = \"Trying to add an already existing cell with uuid: \"\\\n + str(cell.uuid)\n raise KeyError(error_str)\n\n self._cells[cell.uuid] = Cell.from_cell(cell)\n\n return cell.uuid", "def get_card(self, suit, face):\n for card in self.deck:\n if card.suit == suit and card.value == face:\n return card", "def add(\n self,\n cell,\n include_dependencies=True,\n overwrite_duplicate=False,\n update_references=True,\n ):\n if isinstance(cell, Cell):\n cell_set = set([cell])\n if include_dependencies:\n cell_set.update(cell.get_dependencies(True))\n else:\n cell_set = set(cell)\n if include_dependencies:\n for c in cell:\n cell_set.update(c.get_dependencies(True))\n for c in cell_set:\n if (\n not overwrite_duplicate\n and c.name in self.cells\n and self.cells[c.name] is not c\n ):\n raise ValueError(\n \"[GDSPY] Cell named {0} already present in library.\".format(c.name)\n )\n if (\n overwrite_duplicate\n and update_references\n and c.name in self.cells\n and self.cells[c.name] is not c\n ):\n self.replace_references(c.name, c)\n self.cells[c.name] = c\n return self", "def add_faces_to_map(bm, faces, group, skip=None):\n face_map = bm.faces.layers.face_map.active\n group_index = face_map_index_from_name(group.name.lower())\n\n def remove_skipped(f):\n if skip:\n skip_index = face_map_index_from_name(skip.name.lower())\n return not (f[face_map] == skip_index)\n return True\n\n for face in list(filter(remove_skipped, faces)):\n face[face_map] = group_index\n\n obj = bpy.context.object\n\n # -- if auto uv map is set, perform UV Mapping for given faces\n if obj.facemap_materials[group_index].auto_map:\n map_method = obj.facemap_materials[group_index].uv_mapping_method\n uv_map_active_editmesh_selection(faces, map_method)\n\n # -- if the facemap already has a material assigned, assign the new faces to the material\n mat = obj.facemap_materials[group_index].material\n mat_id = [idx for idx, m in enumerate(obj.data.materials) if m == mat]\n if mat_id:\n for f in faces:\n f.material_index = mat_id[-1]", "def detect_face(self, img):\n # Fetch face location from the frame with 128 encoding of face landmarks\n curr_face_loc, name_list, info_list = load_encode_loc(img, self.kwn_names,\n self.kwn_encoding,\n self.status_list, self.since_list)\n print('Current value is ', curr_face_loc, name_list)\n face_list = []\n face_area = []\n print('face loc', curr_face_loc)\n if len(curr_face_loc):\n\n for (top, right, bottom, left), name in zip(curr_face_loc, name_list):\n print(top, right, bottom, left)\n cv2.rectangle(img, (top, right), (bottom, left), (0, 255, 2), 2)\n\n w = right - left\n h = bottom - top\n cx = left + w // 2\n cy = top + h // 2\n area = w * h\n\n for idx, info in enumerate(info_list):\n cv2.putText(img, info, (bottom, int(left * idx * 0.2)),\n cv2.FONT_HERSHEY_COMPLEX, 1,\n (0, 0, 255), 1)\n\n face_list.append([cx, cy])\n face_area.append(area)\n\n i = face_area.index(max(face_area))\n\n return img, [face_list[i], face_area[i]]\n\n else:\n return img, [[0, 0], 0]", "def faces(self, f):\n self._faces = f", "def add_one_facedetect_entry(self, timestamp, count):\n logger.info(\"Add new data point: {} at {}\".format(count,\n datetime.datetime.utcfromtimestamp(timestamp).isoformat()))\n c = self._conn.cursor()\n c.execute(\"INSERT INTO FacesDetect VALUES({}, {})\".format(timestamp, count))\n self._conn.commit()\n new_data = (timestamp, count)\n self.face_detect_data.append(new_data)\n WebClientsCommands.sendNewFacesEntryAll(self.face_detect_data, new_data)", "def FaceToFace(\n movablePlane: str, fixedPlane: str, flip: Boolean, clearance: float\n ) -> \"Feature\":\n return Feature()", "def track_face(video):\n\n # Get the cascade classifier with pre-trained classifiers\n face_cascade = cv2.CascadeClassifier('frontal_face.xml')\n\n # initialize a previous face with zeros\n prev_face = (0, 0, 0, 0)\n result = []\n\n # Loop until frame from video capture returns None\n while True:\n _, img = video.read()\n if img is None:\n break\n\n # convert to gray scale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Do the face detection. Set minNeighbors relatively high\n # to eliminate false positive faces.\n faces = face_cascade.detectMultiScale(gray, 1.3, 5,\n cv2.cv.CV_HAAR_SCALE_IMAGE)\n\n # If the number of faces found is 0, use face from previous frame\n # as a good indicator of where face in the current frame should be.\n # Has inherent error but works well.\n if len(faces) == 0:\n faces = [prev_face]\n\n # Sort faces by width of bounding box in reverse order\n sort_face = sorted(faces, key=lambda x: x[2], reverse=True)\n\n # Get bounding box of largest face, assumes smaller ones are\n # false positives.\n x, y, w, h = sort_face[0]\n\n # append to results\n result.append((x, y, x + w, y + h))\n # set previous face\n prev_face = faces[0]\n\n if VISUALIZE:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_gray = gray[y: y + h, x: x + w]\n roi_color = img[y: y + h, x: x + w]\n cv2.imshow('img', img)\n cv2.waitKey(30)\n\n return result", "def faces(self) -> Polygon:\n return Polygon(self.array, copy=False)", "def copy(self):\n new = Face(np.array(self.norm[:]), self.colour[:])\n return new", "def paint_detected_face_on_image(frame, location, name=None):\n\n # Unpack the coordinates from the location tuple\n top, right, bottom, left = location\n\n if name is None:\n name = 'unknown'\n color = (0, 0, 255) # Red for unrecognized face\n else:\n color = (0, 128, 0) # Dark green for recognized face\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n\n # Draw a label with the name around the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)\n cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)", "def get_image_with_faces(self, image):\n img = image.copy()\n faces = self.__detect_face(img)\n\n if len(faces) > 0:\n for i, d in enumerate(faces):\n x1, y1, x2, y2, _, _ = d.left(), d.top(), \\\n d.right() + 1, d.bottom() + 1, d.width(), d.height()\n cv2.rectangle(img, (x1, y1), (x2, y2), RECT_COLOR, RECT_THICKNESS)\n return img", "def add_actor(\n self,\n actor,\n reset_camera=False,\n name=None,\n culling=False,\n pickable=True,\n render=True,\n remove_existing_actor=True,\n ):\n # Remove actor by that name if present\n rv = None\n if name and remove_existing_actor:\n rv = self.remove_actor(name, reset_camera=False, render=False)\n\n if isinstance(actor, _vtk.vtkMapper):\n actor = Actor(mapper=actor, name=name)\n\n if isinstance(actor, Actor) and name:\n # WARNING: this will override the name if already set on Actor\n actor.name = name\n\n if name is None:\n if isinstance(actor, Actor):\n name = actor.name\n else:\n # Fallback for non-wrapped actors\n # e.g., vtkScalarBarActor\n name = actor.GetAddressAsString(\"\")\n\n actor.SetPickable(pickable)\n # Apply this renderer's scale to the actor (which can be further scaled)\n if hasattr(actor, 'SetScale'):\n actor.SetScale(np.array(actor.GetScale()) * np.array(self.scale))\n self.AddActor(actor) # must add actor before resetting camera\n self._actors[name] = actor\n\n if reset_camera:\n self.reset_camera(render)\n elif not self.camera_set and reset_camera is None and not rv:\n self.reset_camera(render)\n elif render:\n self.parent.render()\n\n self.update_bounds_axes()\n\n if isinstance(culling, str):\n culling = culling.lower()\n\n if culling:\n if culling in [True, 'back', 'backface', 'b']:\n try:\n actor.GetProperty().BackfaceCullingOn()\n except AttributeError: # pragma: no cover\n pass\n elif culling in ['front', 'frontface', 'f']:\n try:\n actor.GetProperty().FrontfaceCullingOn()\n except AttributeError: # pragma: no cover\n pass\n else:\n raise ValueError(f'Culling option ({culling}) not understood.')\n\n self.Modified()\n\n prop = None\n if hasattr(actor, 'GetProperty'):\n prop = actor.GetProperty()\n\n return actor, prop", "def add(self, feature):\n \n if self.bo is not None:\n feature.attach(self.bo)\n \n bo_feature_name = feature.name\n \n if bo_feature_name not in self._d_features:\n log.info(\"Add feature '%s'\" % feature)\n self._d_features[bo_feature_name] = feature\n return(True)\n else:\n log.error(\"Feature name '%s' ever exists - you must delete it before\" % bo_feature_name)\n return(False)", "def add_edge(self, edge):\n try:\n edge_idx = self.edges.index(edge)\n return self.edges[edge_idx]\n except Exception:\n self.edges.append(edge)\n return edge", "def detect_face(image):\n cascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascadePath)\n faces = faceCascade.detectMultiScale(image)\n if len(faces)>=1:#Should be == , not >=\n return True\n return False", "def face_detection(img, faceCascade=faceCascade):\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tfaces = faceCascade.detectMultiScale(\n\t\tgray,\n\t\tscaleFactor=1.2,\n\t\tminNeighbors=5,\n\t\tminSize=(32, 32))\n\n\t# If no face detected\n\tif len(faces) == 0:\n\t\tw = min(img.shape[0], img.shape[1])\n\t\treturn img[(img.shape[0]-w)//2:(img.shape[0]+w)//2, (img.shape[1]-w)//2:(img.shape[1]+w)//2, :]\n\n\t# If faces detected, choose the face with the max size\n\tmax_h, index = 0, 0\n\tfor i, (x, y, w, h) in enumerate(faces):\n\t\tif max_h < h:\n\t\t\tmax_h, index = h, i\n\n\t(x, y, w, h) = faces[index]\n\n\tif img.shape[0]>img.shape[1]:\n\t\tif x + w/2 < img.shape[0]/2:\n\t\t\treturn img[:img.shape[1],:,:]\n\n\t\telse:\n\t\t\treturn img[-img.shape[1]:,:,:]\n\n\telse:\n\t\tif y + h/2 < img.shape[1]/2:\n\t\t\treturn img[:,:img.shape[0],:]\n\n\t\telse:\n\t\t\treturn img[:,-img.shape[0]:,:]", "def make_legend_face(face):\n if face is None:\n legendface = pyvista.PolyData([0.0, 0.0, 0.0])\n elif face in [\"-\", \"line\"]:\n legendface = _line_for_legend()\n elif face in [\"^\", \"triangle\"]:\n legendface = pyvista.Triangle()\n elif face in [\"o\", \"circle\"]:\n legendface = pyvista.Circle()\n elif face in [\"r\", \"rectangle\"]:\n legendface = pyvista.Rectangle()\n elif isinstance(face, pyvista.PolyData):\n legendface = face\n else:\n raise ValueError(\n f'Invalid face \"{face}\". Must be one of the following:\\n'\n '\\t\"triangle\"\\n'\n '\\t\"circle\"\\n'\n '\\t\"rectangle\"\\n'\n '\\tNone'\n '\\tpyvista.PolyData'\n )\n return legendface", "def detect_face(face_file, max_results=10):\n client = vision.ImageAnnotatorClient()\n\n content = face_file.read()\n image = types.Image(content=content)\n\n return client.face_detection(image=image).face_annotations", "def _interiorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n interiorIDs = numerix.concatenate((numerix.ravel(XYids[ ..., 1:-1]),\n numerix.ravel(XZids[:, 1:-1,:]),\n numerix.ravel(YZids[1:-1, ...].swapaxes(0, 1))))\n\n from fipy.variables.faceVariable import FaceVariable\n interiorFaces = FaceVariable(mesh=self, value=False)\n interiorFaces[interiorIDs] = True\n return interiorFaces", "def add_named_faces(self, name: str, *faces: Face):\n face_index_list = self._named_faces.get(name) or []\n for face in faces:\n face_index_list.append(self._find_face_index(face))\n self._named_faces[name] = face_index_list", "def draw_face(self, face, window, xy, width):\n width = width / 2 - (width/2/20)\n\n if face == \"U\":\n face = self.state[0:4]\n elif face == \"L\":\n face = self.state[4:8]\n elif face == \"F\":\n face = self.state[8:12]\n elif face == \"R\":\n face = self.state[12:16]\n elif face == \"B\":\n face = self.state[16:20]\n elif face == \"D\":\n face = self.state[20:24]\n\n rect1 = pygame.Rect((xy[0],xy[1], width, width))\n rect2 = pygame.Rect((xy[0]+(width+(width/10)),xy[1], width, width))\n rect3 = pygame.Rect((xy[0],xy[1]+(width+(width/10)), width, width))\n rect4 = pygame.Rect((xy[0]+(width+(width/10)),xy[1]+(width+(width/10)), width, width))\n pygame.draw.rect(window, self.colors[face[0]], rect2)\n pygame.draw.rect(window, self.colors[face[1]], rect1)\n pygame.draw.rect(window, self.colors[face[2]], rect3)\n pygame.draw.rect(window, self.colors[face[3]], rect4)", "def detect_face(face_file, max_results=4):\n client = vision.ImageAnnotatorClient()\n\n content = face_file.read()\n image = types.Image(content=content)\n\n return client.face_detection(image=image).face_annotations", "def add_entry(self, ent, can_replace=True):\n if self.has_entry(ent.ID):\n if not can_replace:\n self.visual.error(f\"Entry {ent.ID} already exists in the collection!\")\n return None\n # delete existing, to replace\n self.remove(ent)\n ent = self.add_entry_to_collection_containers(ent)\n if ent is None:\n return ent\n self.add_entry_to_bibtex_db(ent)\n self.visual.log(f\"Added ID: {ent.ID}\")\n return ent", "def detectFaces():\n faceEngine = VLFaceEngine()\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n\n imageWithOneFace = VLImage.load(filename=EXAMPLE_O)\n pprint.pprint(detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False).asDict())\n detection = detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False)\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection))\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection.boundingBox.rect))\n\n imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)\n severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False)\n\n pprint.pprint(\n detector.redetect(\n images=[\n ImageForRedetection(imageWithSeveralFaces, [face.boundingBox.rect for face in severalFaces[0]]),\n ImageForRedetection(imageWithOneFace, [detection.boundingBox.rect]),\n ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)]),\n ]\n )\n )", "def _get_vertex_face_adjacency(self, data=None):\n # Input checks:\n nv = self.vertices.shape[0]\n f = self.faces # Convert to an ndarray or pass if already is one\n # Computation\n row = f.reshape(-1) # Flatten indices\n col = np.tile(np.arange(len(f)).reshape((-1, 1)), (1, f.shape[1])).reshape(-1) # Data for vertices\n shape = (nv, len(f))\n\n if not data:\n data = np.ones(len(col), dtype=np.bool)\n\n # assemble into sparse matrix\n return coo_matrix((data, (row, col)), shape=shape, dtype=data.dtype)", "def faceIntersection(self, face):\n # Test wether both ends of the edge are in the same half space\n # (relative to <face>'s plane).\n normal = face.normalVect()\n v0 = vector(face.vertices[0])\n vp = vector(self.pvt)\n vn = vector(self.nvt)\n p = normal.dotProduct(vp - v0) * normal.dotProduct(vn - v0)\n if p > 0:\n return False\n elif abs(p) <= COMPARISON_EPSILON or abs(normal.dotProduct(vp - vn) / (normal.norm() * (vp - vn).norm())) <= COMPARISON_EPSILON:\n # print('ah')\n return False\n else:\n interVect = vn + (normal.dotProduct(v0 - vn) /\n normal.dotProduct(vp - vn)) * (vp - vn)\n lastCross = ((vector(face.vertices[-1]) - interVect) *\n (vector(face.vertices[0]) - interVect))\n for i in range(len(face.vertices)):\n cross = ((vector(face.vertices[i]) - interVect) *\n (vector(face.vertices[(i + 1) % len(face.vertices)]) -\n interVect))\n p = cross.dotProduct(lastCross)\n if p < 0:\n return False\n elif p == 0 and cross.norm() != 0:\n if cross.norm() > COMPARISON_EPSILON:\n warnings.warn(\"Cross product's norm is very low\")\n lastCross = cross\n return interVect.coords()", "def add_favorite(self, deck_id):\n added_deck = self.data_source.add_favorite(self.user_id, deck_id)\n\n return added_deck", "def export_face(ind, face):\n isplane, center, radius, face_size = get_sphere_info(face)\n if isplane:\n macro = \"FlatFace({}, {}, array[{}]{{{}}}, {}, {})\\n\"\n return macro.format(ind, len(face), len(face), pov_vector_list(face),\n pov_vector(center), face_size)\n else:\n macro = \"BubbleFace({}, {}, array[{}]{{{}}}, {}, {}, {})\\n\"\n return macro.format(ind, len(face), len(face), pov_vector_list(face),\n pov_vector(center), radius, face_size)", "def detect_face(self, img):\n #convert the test image to gray image as opencv face detector expects gray images\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #let's detect multiscale (some images may be closer to camera than others) images\n #result is a list of faces\n faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);\n\n #if no faces are detected then return None\n if (len(faces) == 0):\n return None, None\n\n #under the assumption that there will be only one face,\n #extract the face area\n (x, y, w, h) = faces[0]\n\n #return only the face part of the image\n return gray[y:y+w, x:x+h], faces[0]", "def SetFace(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_WireDivide_SetFace(self, *args)", "def add_feature(self, feat: str):\n if feat not in self._features:\n self._features.append(feat)\n else:\n raise IDAlreadyExists", "def __detect_face(self, img):\n gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)\n return self.detector(gray, 1)", "def face_detector(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # faceCascade imports in the previously made classifier\n faceCascade = cv2.CascadeClassifier('src/face_detection/haarcascade_frontalface_default.xml')\n faces = faceCascade.detectMultiScale(\n gray, \n scaleFactor=1.2,\n minNeighbors=1, \n minSize=(100, 100)\n )\n\n return faces", "def copy(self):\n newVertices = [v.copy() for v in self.vertices]\n return face(newVertices)", "def update(self,image):\r\n \r\n self._faces=[]\r\n \r\n if util.isgray(image):\r\n image=cv2.equalizeHist(image)\r\n \r\n else:\r\n \r\n image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n cv2.equalizeHist(image,image)\r\n \r\n minsize=util.widthheightdividedby(image,8)\r\n\r\n \r\n\r\n \r\n facerect=self._faceclassifier.detectMultiScale(image,\r\n self.scalefactor,\r\n self.minneighbors,\r\n self.flags,\r\n minsize)\r\n \r\n \"\"\"if facerects is not None:\r\n \r\n for facerect in facerects:\r\n face=face()\r\n \r\n face.facerect=facerect\r\n \r\n \r\n x,y,w,h=facerect\r\n \r\n # Seek an eye in the upper-left part of the face. \r\n searchRect = (x+w/7, y, w*2/7, h/2) \r\n face.leftEyeRect = self._detectOneObject( \r\n self._eyeClassifier, image, searchRect, 64) \r\n \r\n \r\n \r\n # Seek an eye in the upper-right part of the face. \r\n searchRect = (x+w*4/7, y, w*2/7, h/2) \r\n face.rightEyeRect = self._detectOneObject( \r\n self._eyeClassifier, image, searchRect, 64) \r\n \r\n \r\n \r\n # Seek a nose in the middle part of the face. \r\n searchRect = (x+w/4, y+h/4, w/2, h/2) \r\n face.noseRect = self._detectOneObject( \r\n self._noseClassifier, image, searchRect, 32) \r\n \r\n # Seek a mouth in the lower-middle part of the face. \r\n searchRect = (x+w/6, y+h*2/3, w*2/3, h/3) \r\n face.mouthRect = self._detectOneObject( \r\n self._mouthClassifier, image, searchRect, 16) \r\n \r\n \r\n \r\n self._faces.append(face)\r\n\r\n \r\n \r\n def _detectoneobject(self,\r\n classifier,\r\n image,\r\n rect,\r\n imagesizetominsizeratio):\r\n \r\n x ,y ,w ,h=rect\r\n \r\n minsize=util.widthheightdividedby(image,\r\n imagesizetominsizeratio)\r\n \r\n subimage=image[y:y+h,x:x+w]\r\n \r\n subrect=classifier.dectectMultiScale(subimage,\r\n self.scalefactor,\r\n self.minneighbors,\r\n self.flags,\r\n minsize)\r\n \r\n if len(subrect)==0:\r\n return None\r\n \r\n subx,suby,subw,subh=subrects[0]\r\n \r\n return (x+subx,y+suby,w+subw,h+subh)\r\n \r\n \"\"\"", "def _exteriorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n exteriorIDs = numerix.concatenate((numerix.ravel(XYids[..., 0].swapaxes(0, 1)),\n numerix.ravel(XYids[..., -1].swapaxes(0, 1)),\n numerix.ravel(XZids[:, 0,:]),\n numerix.ravel(XZids[:, -1,:]),\n numerix.ravel(YZids[ 0, ...]),\n numerix.ravel(YZids[-1, ...])))\n\n from fipy.variables.faceVariable import FaceVariable\n exteriorFaces = FaceVariable(mesh=self, value=False)\n exteriorFaces[exteriorIDs] = True\n return exteriorFaces", "def _load_known_faces(self):\n faces_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'faces')\n faces = [\n os.path.join(faces_dir, f) for f in os.listdir(faces_dir) \\\n if f.endswith('.jpeg') or f.endswith('.jpg') or f.endswith('.png')\n ]\n known_images = [face_recognition.load_image_file(i) for i in faces]\n self.known_faces = []\n self.faces_names = [x.split('/')[-1].split('.')[0].replace('_', ' ').title() for x in faces]\n for image in known_images:\n encoding = face_recognition.face_encodings(image)\n if len(encoding) > 0:\n logging.debug('Adding known face')\n self.known_faces.append(encoding[0])", "def registered_face_id(self, registered_face_id):\n\n self._registered_face_id = registered_face_id", "def match_face(data, encoding):\r\n\t# Attempt to match each face in the input image to the known faces\r\n\tmatches = face_recognition.compare_faces(data[\"encodings\"], encoding, global_vars.tolerance)\r\n\tname = \"?\"\r\n\r\n\t# Check to see if there is a match\r\n\tif True in matches:\r\n\t\t# Find the indexes of all matched faces\r\n\t\t# Initialize dictionary to count the total number of times each face was matched\r\n\t\tmatchedIdxs = [i for (i, b) in enumerate(matches) if b]\r\n\t\tcounts = {}\r\n\r\n\t\t# Loop over the matched indexes and maintain a count for each recognized face\r\n\t\tfor i in matchedIdxs:\r\n\t\t\tname = data[\"names\"][i]\r\n\t\t\tcounts[name] = counts.get(name, 0) + 1\r\n\t\t# Use the recognized face with most votes\r\n\t\tname = max(counts, key=counts.get)\r\n\treturn name", "def face_maker(size=180, color='0x9C661F'):\n face = GOval(size, size-20)\n face.filled = True\n face.fill_color = color\n return face", "def add_image(self, file_name, content):\n self.face_fs.put(content, filename=file_name)", "def add_vertex(self, vertex):\n if vertex.id not in self.vertices.keys():\n self.vertices[vertex.id] = vertex", "def recognize_faces(image_file_path):\n image_pil = Image.open(image_file_path)\n draw = ImageDraw.Draw(image_pil)\n\n known_face_encodings_dict = get_known_face_encodings_dict()\n known_names = list(known_face_encodings_dict.keys())\n known_face_encodings = list(known_face_encodings_dict.values())\n\n del known_face_encodings_dict\n\n for face_location in face_detection.get_face_locations(image_file_path):\n face_encoding = get_face_encodings(\n image_file_path, known_face_locations=[face_location]\n )[0]\n\n recognition_flags = face_recognition.compare_faces(\n known_face_encodings, face_encoding\n )\n\n for flag, name in zip(recognition_flags, known_names):\n if not flag:\n continue\n\n top, right, bottom, left = face_location\n draw.rectangle((left, top, right, bottom), outline=\"#FF1493\")\n text_width, text_height = draw.textsize(name)\n draw.rectangle(\n (left, bottom, right, bottom + text_height + 10),\n fill=\"#FF1493\",\n outline=\"#FF1493\",\n )\n draw.text((left + 6, bottom + 5), name, fill=\"white\")\n\n del draw # conserve resources\n image_pil.show()", "def _getface_hog_cnn(self,img,mode):\n faces = face_locations(img,number_of_times_to_upsample=1,model=self.model_name)\n if len(faces)==0:\n return None\n if mode == 1:\n out = faces[0]\n elif mode ==2 :\n top,right,bottom,left = faces[0]\n x,y,w,h = int(left), int(top), int(right-left+1), int(bottom-top+1)\n out = [x,y,w,h]\n return out", "def _merge_faces(merge, image):\n for face in image.faces:\n _merge_face(merge, image, face)\n return image", "def faces(self, image):\n\n response = self._send_request(\"faces\", files=dict(image=image))\n return response['objectdetection']", "def lookup_known_face(self, face_encoding, known_face_encodings, known_face_metadata):\n metadata = None\n\n # If our known face list is empty, just return nothing since we can't possibly have seen this face.\n if len(known_face_encodings) == 0:\n return metadata\n\n # Calculate the face distance between the unknown face and every face on in our known face list\n # This will return a floating point number between 0.0 and 1.0 for each known face. The smaller the number,\n # the more similar that face was to the unknown face.\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n\n # Get the known face that had the lowest distance (i.e. most similar) from the unknown face.\n best_match_index = np.argmin(face_distances)\n # best_match_index = np.argmax(face_distances)\n\n # print('best_match_index=%i' % best_match_index)\n\n # If the face with the lowest distance had a distance under 0.6, we consider it a face match.\n # 0.6 comes from how the face recognition model was trained. It was trained to make sure pictures\n # of the same person always were less than 0.6 away from each other.\n # Here, we are loosening the threshold a little bit to 0.65 because it is unlikely that two very similar\n # people will come up to the door at the same time.\n ident_limit = 0.6\n if face_distances[best_match_index] < ident_limit:\n # If we have a match, look up the metadata we've saved for it (like the first time we saw it, etc)\n metadata = known_face_metadata[best_match_index]\n\n metadata[\"face_distance\"] = face_distances[best_match_index]\n\n # print('metadata:')\n # print(metadata)\n\n return metadata", "def detect_face_api(self, img):\n\n curr_face_loc, name_list, info_list = load_encode_loc(img, self.kwn_names,\n self.kwn_encoding,\n self.status_list, self.since_list)\n print('Current value is ', curr_face_loc, name_list)\n face_list = []\n face_area = []\n print('face loc', curr_face_loc)\n if len(curr_face_loc):\n\n for (top, right, bottom, left), name in zip(curr_face_loc, name_list):\n print(top, right, bottom, left)\n cv2.rectangle(img, (top, right), (bottom, left), (0, 255, 2), 2)\n\n w = right - left\n h = bottom - top\n cx = left + w // 2\n cy = top + h // 2\n area = w * h\n\n for idx, info in enumerate(info_list):\n cv2.putText(img, info, (bottom, int(left * idx * 0.2)),\n cv2.FONT_HERSHEY_COMPLEX, 1,\n (0, 0, 255), 1)\n\n face_list.append([cx, cy])\n face_area.append(area)\n\n i = face_area.index(max(face_area))\n\n return img, [face_list[i], face_area[i]]\n\n else:\n return img, [[0, 0], 0]", "def faceDiv(self):\n if getattr(self, '_faceDiv', None) is None:\n n = self.vnC\n # Compute faceDivergence operator on faces\n if(self.dim == 1):\n D = ddx(n[0])\n elif(self.dim == 2):\n D1 = sp.kron(speye(n[1]), ddx(n[0]))\n D2 = sp.kron(ddx(n[1]), speye(n[0]))\n D = sp.hstack((D1, D2), format=\"csr\")\n elif(self.dim == 3):\n D1 = kron3(speye(n[2]), speye(n[1]), ddx(n[0]))\n D2 = kron3(speye(n[2]), ddx(n[1]), speye(n[0]))\n D3 = kron3(ddx(n[2]), speye(n[1]), speye(n[0]))\n D = sp.hstack((D1, D2, D3), format=\"csr\")\n # Compute areas of cell faces & volumes\n S = self.area\n V = self.vol\n self._faceDiv = sdiag(1/V)*D*sdiag(S)\n return self._faceDiv", "def SetFace(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_SetFace(self, *args)", "def add_family(self, f):\n if f.fid in self.families.keys():\n print(f'US22 - {f.fid} id has a duplicate in line number {f._fid_line}')\n self.families[f.fid] = f\n return Family()", "def react_to_faces(faces):\n if (len(faces) == 1):\n # Get the location of the face (one of six positions)\n face_loc = get_location(faces[0].bbox, vision.VIDEO_SIZE)\n # Set the Raspimon pose\n if face_loc is not None:\n sense.set_pixels(VOLT_POSES[face_loc])", "def _add_vertex(self, x, y):\n v = Vertex2(x, y)\n i = bisect(self.vertices, v)\n \n # if vertex at these coordinates exists just return it\n if len(self.vertices) > i and self.vertices[i] == v:\n return self.vertices[i]\n \n # otherwise add new vertex in sorted position and return it\n self.vertices.insert(i, v)\n return v", "def add(self, data, check_exists=True): # pragma: no cover\n raise NotImplementedError", "def add_sdcube(self, mapping, name=None):\n with h5py.File(self.filename, 'r') as h5_file:\n sdcubes = load_attribute(h5_file, 'sdcubes')\n if name in sdcubes:\n logging.error('A group with the name %s alread exists' % name)\n raise KeyError('A group with the name %s alread exists' % name)\n for key in sdcubes:\n if name.lower() == key:\n raise Warning('%s looks like %s!' % (name, key))\n raise KeyError('%s looks like %s!' % (name, key))\n if not name:\n name = str(len(sdcubes)) #name will be a number\n \n #TODO: until we decide if filenames are needed\n #if not filename:\n # filename = self.filename\n filename = self.filename\n\n\n SdCube(name, filename, mapping)\n with h5py.File(self.filename, 'a') as h5_file:\n sdcubes = load_attribute(h5_file, 'sdcubes')\n sdcubes[name] = filename\n store_attribute(h5_file, 'sdcubes', sdcubes)\n\n # FIXME: it's better for this method to return the added\n # subcube, rather than the name (which, it not already known\n # to the caller, can be accessed through the returned cube's\n # \"name\" attribute)\n\n return name", "def newidfobject(self, key, **kwargs):\n # get list of objects\n existing_objs = self.idfobjects[key] # a list\n\n # create new object\n try:\n new_object = self.anidfobject(key, **kwargs)\n except BadEPFieldError as e:\n if str(e) == \"unknown field Key_Name\":\n # Try backwards compatibility with EnergyPlus < 9.0.0\n name = kwargs.pop(\"Key_Name\")\n kwargs[\"Name\"] = name\n else:\n log(f\"Could not add object {key} because of: {e}\", lg.WARNING)\n return None\n else:\n new_object = self.anidfobject(key, **kwargs)\n # If object is supposed to be 'unique-object', deletes all objects to be\n # sure there is only one of them when creating new object\n # (see following line)\n if \"unique-object\" in set().union(\n *(d.objidd[0].keys() for d in existing_objs)\n ):\n for obj in existing_objs:\n self.removeidfobject(obj)\n self.addidfobject(new_object)\n log(\n f\"{obj} is a 'unique-object'; Removed and replaced with\"\n f\" {new_object}\",\n lg.DEBUG,\n )\n return new_object\n if new_object in existing_objs:\n # If obj already exists, simply return\n log(\n f\"object '{new_object}' already exists in {self.name}. \"\n f\"Skipping.\",\n lg.DEBUG,\n )\n return new_object\n elif new_object not in existing_objs and new_object.nameexists():\n obj = self.getobject(\n key=new_object.key.upper(), name=new_object.Name.upper()\n )\n self.removeidfobject(obj)\n self.addidfobject(new_object)\n log(\n f\"{obj} exists but has different attributes; Removed and replaced \"\n f\"with {new_object}\",\n lg.DEBUG,\n )\n return new_object\n else:\n # add to model and return\n self.addidfobject(new_object)\n log(f\"object '{new_object}' added to '{self.name}'\", lg.DEBUG)\n return new_object", "def _box_face(image, face):\n draw = PIL.ImageDraw.Draw(image.image)\n draw.rectangle(face.as_box(), outline=\"yellow\")" ]
[ "0.7516604", "0.62119764", "0.61790365", "0.5979341", "0.5872748", "0.5751965", "0.5587595", "0.539488", "0.5394591", "0.5375931", "0.53083056", "0.5234154", "0.52226603", "0.5124483", "0.5124483", "0.5102945", "0.50553864", "0.5042418", "0.5035193", "0.502191", "0.5008115", "0.4987875", "0.49856907", "0.49489287", "0.4946597", "0.49424168", "0.49255627", "0.49093345", "0.48817888", "0.48489112", "0.48389876", "0.4833186", "0.48109066", "0.4796467", "0.47926846", "0.47832888", "0.4779551", "0.47640878", "0.47509053", "0.47357598", "0.47356576", "0.47334403", "0.4725399", "0.4717313", "0.4696169", "0.46828768", "0.46789485", "0.4672445", "0.46427667", "0.46254718", "0.4619078", "0.4616658", "0.46047845", "0.46041226", "0.4601172", "0.45995477", "0.45987862", "0.45858046", "0.4568218", "0.4563548", "0.45476875", "0.45440727", "0.4535321", "0.45338455", "0.45112342", "0.45069352", "0.45028", "0.45012492", "0.44987857", "0.44921136", "0.4489239", "0.4475229", "0.447293", "0.44722882", "0.4470492", "0.44500047", "0.4439778", "0.44347668", "0.44267878", "0.43851587", "0.43846676", "0.43836033", "0.43808615", "0.4363135", "0.4362788", "0.43604922", "0.4360015", "0.43551627", "0.43517354", "0.434762", "0.43465665", "0.434023", "0.43366104", "0.4333739", "0.43302312", "0.43255475", "0.43224847", "0.43158835", "0.43047854", "0.43046376" ]
0.8184843
0
Return a list of vertices that form the outer boundary of finite faces of the DCEL.
def get_outer_boundary_of_voronoi(self): edge = [edge for edge in self.edges if not edge.nxt][0] # next(obj for obj in objs if obj.val==5) first_vertex = edge.origin outer_boundary = [] while (not edge.get_destination() == first_vertex): if(edge.get_destination().is_infinity()): edge = edge.twin.nxt else: outer_boundary.append(edge) edge = edge.nxt outer_boundary.append(edge) return outer_boundary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetInteriorEdgesQuad(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesQuad()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesQuad()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags", "def vertices(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._vertices", "def get_outer_vertices(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for point in part[0][0:-1]\n ]", "def bounded_edges(self):\n obj = self.Vrepresentation()\n edges = []\n for i in range(len(obj)):\n if not obj[i].is_vertex(): continue\n for j in range(i+1,len(obj)):\n if not obj[j].is_vertex(): continue\n if self.vertex_adjacency_matrix()[i,j] == 0: continue\n yield (obj[i], obj[j])", "def _interiorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n interiorIDs = numerix.concatenate((numerix.ravel(XYids[ ..., 1:-1]),\n numerix.ravel(XZids[:, 1:-1,:]),\n numerix.ravel(YZids[1:-1, ...].swapaxes(0, 1))))\n\n from fipy.variables.faceVariable import FaceVariable\n interiorFaces = FaceVariable(mesh=self, value=False)\n interiorFaces[interiorIDs] = True\n return interiorFaces", "def GetInteriorEdgesPent(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesPent()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesPent()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags", "def faces_as_vertices(self) -> Iterable[List[Vec3]]:\n v = self.vertices\n for face in self.faces:\n yield [v[index] for index in face]", "def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d", "def GetInteriorEdgesTri(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesTri()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesTri()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags", "def vertices(self):\n return self.pointlist", "def get_bounded_faces(self):\n return [face for face in self.faces if face.is_bounded()]", "def GetBoundaryEdgesHex(self):\n\n p = self.InferPolynomialDegree()\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n\n # FIRST GET BOUNDARY FACES\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesHex()\n\n # BUILD A 2D MESH\n tmesh = Mesh()\n tmesh.element_type = \"quad\"\n tmesh.elements = self.faces\n tmesh.nelem = tmesh.elements.shape[0]\n del tmesh.faces\n del tmesh.points\n\n # ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES\n self.edges = tmesh.GetEdgesQuad()", "def GetBoundaryFacesHex(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.faces,np.ndarray):\n if self.faces.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.faces.shape[1] == 4 and p > 1:\n pass\n else:\n return\n\n node_arranger = NodeArrangementHex(p-1)[0]\n\n # CONCATENATE ALL THE FACES MADE FROM ELEMENTS\n all_faces = np.concatenate((np.concatenate((\n np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],\n self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),\n self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),\n self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES\n freqs_inv = itemfreq(inv)\n faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.faces = uniques[faces_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES\n all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)\n all_faces_in_faces = np.where(all_faces_in_faces==True)[0]\n\n # boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)\n boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]\n boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]\n self.faces = self.faces.astype(np.uint64)\n self.boundary_face_to_element = boundary_face_to_element", "def vertices(self):\n return map(Vertex, self._top_exp.vertices())", "def boundary_polygon(self):\n try:\n return self.boundary_polygon_by_edges()\n except Exception as exc:\n self.log.warning('Warning, boundary_polygon() failed using edges! Trying polygon union method')\n self.log.warning(exc,exc_info=True)\n return self.boundary_polygon_by_union()", "def cw_face_edges(self,face):\n\n l0 = self.region_link[face]\n if face == self.left_region[l0]:\n l0 = (l0[1], l0[0])\n l = l0\n\n traversing = True\n edges = []\n while traversing:\n edges.append(l)\n r = self.right_region[l]\n if r == face:\n l = self.succ_right[l]\n else:\n l = self.succ_left[l]\n if l == l0:\n traversing = False\n return edges", "def vertices(self) -> list[Point]:\n first_polygon_index = self.rank - max(self.pdim - 1, 1) - 1\n new_shape = self.shape[:first_polygon_index] + (-1, self.shape[-1])\n array = self.array.reshape(new_shape)\n return list(distinct(Point(x, copy=False) for x in np.moveaxis(array, -2, 0)))", "def vertices(self):\n return list(self._graph)", "def boundary_polygon_by_edges(self):\n lines=self.boundary_linestrings()\n polys=join_features.lines_to_polygons(lines,close_arc=False)\n if len(polys)>1:\n raise GridException(\"somehow there are multiple boundary polygons\")\n return polys[0]", "def get_vertices(self) -> []:\n return [i for i in self.adj_list]", "def calculateMeshInv(mesh_face_vertices):\n mesh_inv = []\n for mesh in mesh_face_vertices:\n U = np.array([\n [mesh[0, 0], mesh[1, 0], mesh[2, 0]],\n [mesh[0, 1], mesh[1, 1], mesh[2, 1]],\n [1, 1, 1],\n ])\n mesh_inv.append(np.linalg.inv(U))\n return np.array(mesh_inv)", "def find_isolated_vertices(self):\n graph = self.__graph_dict\n isolated = []\n for vertex in graph:\n # print(isolated,vertex)\n if not graph[vertex]:\n isolated += [vertex]\n return isolated", "def mesh_boundary(mesh):\n adja = edges_to_adjacency_matrix(mesh)\n r = sparse.extract.find(adja)\n li = r[0][np.where(r[2] == 1)]\n lj = r[1][np.where(r[2] == 1)]\n edges_boundary = np.vstack([li, lj]).T\n \"\"\"\n # alternative implementation based on edges and grouping from trimesh\n # instead of adjacency matrix\n from trimesh import grouping\n groups = grouping.group_rows(mesh.edges_sorted, require_count=1)\n # vertex_boundary = np.unique(open_mesh.edges_sorted[groups])\n edges_boundary = mesh.edges_sorted[groups]\n \"\"\"\n if li.size == 0:\n print('No holes in the surface !!!!')\n return np.array()\n else:\n return edges_to_boundary(edges_boundary)", "def GetElementsWithBoundaryFacesHex(self):\n\n # DO NOT COMPUTE FACES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES\n assert self.elements is not None\n assert self.faces is not None\n\n if self.boundary_face_to_element is not None:\n return self.boundary_face_to_element\n\n # THIS METHOD ALWAYS RETURNS THE FACE TO ELEMENT ARRAY, AND DOES NOT CHECK\n # IF THIS HAS BEEN COMPUTED BEFORE, THE REASON BEING THAT THE FACES CAN COME\n # EXTERNALLY WHOSE ARRANGEMENT WOULD NOT CORRESPOND TO THE ONE USED INTERNALLY\n # HENCE THIS MAPPING BECOMES NECESSARY\n\n C = self.InferPolynomialDegree() - 1\n node_arranger = NodeArrangementHex(C)[0]\n\n all_faces = np.concatenate((np.concatenate((\n np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],\n self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),\n self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),\n self.elements[:,node_arranger[5,:]]),axis=0).astype(self.faces.dtype)\n\n all_faces_in_faces = in2d(all_faces,self.faces[:,:4],consider_sort=True)\n all_faces_in_faces = np.where(all_faces_in_faces==True)[0]\n\n boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)\n boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]\n boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]\n\n\n # SO FAR WE HAVE COMPUTED THE ELEMENTS THAT CONTAIN FACES, HOWEVER\n # NOTE THAT WE STILL HAVE NOT COMPUTED A MAPPING BETWEEN ELEMENTS AND\n # FACES. WE ONLY KNOW WHICH ELEMENTS CONTAIN FACES FROM in2d.\n # WE NEED TO FIND THIS MAPPING NOW\n\n # WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS\n faces = self.elements[boundary_face_to_element[:,0][:,None],\n node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)\n\n # CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED\n assert np.sum(faces[:,:4].astype(np.int64) - self.faces[:,:4].astype(np.int64)) == 0\n\n # NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES\n from Florence.Tensor import shuffle_along_axis\n row_mapper = shuffle_along_axis(faces[:,:4],self.faces[:,:4],consider_sort=True)\n\n # UPDATE THE MAP\n boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]\n self.boundary_face_to_element = boundary_face_to_element\n\n return self.boundary_face_to_element", "def vertices(self):\n return self._vertices", "def get_faces(ulist, vlist):\n width = len(ulist)\n faces = []\n for i in range(len(ulist) - 1):\n for j in range(len(vlist) - 1):\n topleft = j * width + i\n topright = topleft + 1\n bottomleft = ((j + 1) * width) + i\n bottomright = bottomleft + 1\n one = [topleft, topright, bottomleft]\n two = [bottomleft, topright, bottomright]\n faces.append(one)\n faces.append(two)\n\n return faces", "def vertices(self):\n d = self.space_dimension()\n v = vector(ZZ, d)\n points = []\n for g in self.minimized_generators():\n for i in range(0,d):\n v[i] = g.coefficient(Variable(i))\n v_copy = copy.copy(v)\n v_copy.set_immutable()\n points.append(v_copy)\n return tuple(points)", "def GetInteriorFacesHex(self):\n\n if not isinstance(self.all_faces,np.ndarray):\n self.GetFacesHex()\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesHex()\n\n face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)\n face_flags[face_flags==True] = 1\n face_flags[face_flags==False] = 0\n interior_faces = self.all_faces[face_flags==False,:]\n\n return interior_faces, face_flags", "def get_faces(self):\n faces = []\n for j in range(0, self.height - 1):\n for i in range(0, self.width - 1):\n # add the two triangle faces\n tl = (j * self.width) + i\n tr = (j * self.width) + i + 1\n bl = ((j+1) * self.width) + i\n br = ((j+1) * self.width) + i + 1\n\n face = [bl, tr, tl]\n faces.append(face)\n face = [bl, br, tr]\n faces.append(face)\n return faces", "def vertices(self):\n try:\n return self._vertices\n except:\n self._vertices = [list(x) for x in self.vertex_generator()]\n return self._vertices", "def vertices(self):\r\n return self.adjacent.keys()", "def get_vertices(self):\n vertices = []\n V = [[-self.base_vectors[:,n], self.base_vectors[:,n]] for n in range(self.base_vectors.shape[1])]\n combs = list(itertools.product(*V))\n for cb in combs:\n cb = np.sum(np.array(cb).T, axis=1, keepdims=True)\n vertices.append(self.base_vertices + cb)\n\n vertices = np.concatenate(vertices,axis=1)\n return vertices", "def boundary_edge_ids(self,):\n return self.boundary_edge_ids_", "def GetElementsWithBoundaryEdgesTri(self):\n\n if isinstance(self.boundary_edge_to_element,np.ndarray):\n if self.boundary_edge_to_element.shape[1] > 1 and self.boundary_edge_to_element.shape[0] > 1:\n return self.boundary_edge_to_element\n\n # DO NOT COMPUTE EDGES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES\n assert self.elements is not None\n assert self.edges is not None\n\n edge_elements = np.zeros((self.edges.shape[0],2),dtype=np.int64)\n\n # FIND WHICH FACE NODES ARE IN WHICH ELEMENT\n for i in range(self.edges.shape[0]):\n x = []\n for j in range(2):\n x.append(np.where(self.elements[:,:3]==self.edges[i,j])[0])\n\n # FIND WHICH ELEMENTS CONTAIN ALL FACE NODES - FOR INTERIOR ELEMENTS\n # THEIR CAN BE MORE THAN ONE ELEMENT CONTAINING ALL FACE NODES\n z = x[0]\n for k in range(1,len(x)):\n z = np.intersect1d(x[k],z)\n\n # CHOOSE ONLY ONE OF THESE ELEMENTS\n edge_elements[i,0] = z[0]\n # WHICH COLUMNS IN THAT ELEMENT ARE THE FACE NODES LOCATED\n cols = np.array([np.where(self.elements[z[0],:]==self.edges[i,0])[0],\n np.where(self.elements[z[0],:]==self.edges[i,1])[0]\n ])\n\n cols = np.sort(cols.flatten())\n\n if cols[0] == 0 and cols[1] == 1:\n edge_elements[i,1] = 0\n elif cols[0] == 1 and cols[1] == 2:\n edge_elements[i,1] = 1\n elif cols[0] == 0 and cols[1] == 2:\n edge_elements[i,1] = 2\n\n self.boundary_edge_to_element = edge_elements\n return edge_elements", "def vertices(self):\n return list(self.__graph.values())", "def vertices(self):\n s = set([x for x in self.edges.keys()])\n t = set([y for v in self.edges.values() for (y,d) in v.items()])\n v = s.union(t)\n return list(v)", "def _getFaces(self, it_mesh_poly):\n\n faces = []\n it_mesh_poly.reset()\n while not it_mesh_poly.isDone():\n faces += it_mesh_poly.getVertices()\n it_mesh_poly.next(1)\n\n return faces", "def _get_all_vertices(self, ref_frame='WORLD') -> np.ndarray:\n\n\t\tdepsgraph = bpy.context.evaluated_depsgraph_get() # to account for deformations\n\n\t\tif ref_frame not in {'LOCAL', 'WORLD'}:\n\t\t\traise ValueError(f\"Invalid ref_frame: {ref_frame}. Must be one of ['LOCAL', 'WORLD']\")\n\n\t\tverts = []\n\n\t\tfor mesh in self._meshes:\n\n\t\t\t# use bmesh to get vertices - this accounts for deformations in depsgraph\n\t\t\tbm = bmesh.new()\n\t\t\tbm.from_object(mesh, depsgraph)\n\t\t\tbm.verts.ensure_lookup_table()\n\t\t\tmesh_verts = np.array([x.co for x in bm.verts])\n\t\t\tbm.free()\n\n\t\t\tif ref_frame == 'WORLD':\n\t\t\t\tmesh_verts = np.dot(mesh.matrix_world, np.vstack((mesh_verts.T, np.ones(mesh_verts.shape[0]))))\n\n\t\t\tverts.append(mesh_verts)\n\n\t\tverts = np.concatenate(verts, axis=1)\n\t\tverts /= verts[3] # convert from homogeneous coordinates\n\t\treturn verts[:3].T", "def faces(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._faces", "def get_mesh_boundary(triangles):\n # Create edges and sort each vertices on each edge.\n edge0 = triangles[:,0:2]\n edge1 = triangles[:,1:3]\n edge2 = triangles.take((0,2), axis=1)\n edges = np.concatenate((edge0, edge1, edge2), axis=0)\n edge_sort = np.sort(edges, axis=1)\n\n # Get unique edges that are only present once.\n (uniq, uniq_ids, counts) = np.unique(edge_sort, axis=0, return_index=True, return_counts=True)\n edge_inds = np.arange(edge_sort.shape[0], dtype=int)\n outer_edge_ids = edge_inds[np.in1d(edge_inds, uniq_ids[counts==1])]\n outer_edges = edge_sort[outer_edge_ids,:]\n num_outer_edges = outer_edges.shape[0]\n\n # Assume we need to close the polygon.\n num_outer_verts = num_outer_edges + 1\n\n # Loop over outer edges and use traversal method to get ordered vertices.\n v_start = outer_edges[0,0]\n v_end = outer_edges[0,1]\n vert_inds = -1*np.ones(num_outer_verts, dtype=int)\n vert_inds[0] = v_start\n vert_inds[1] = v_end\n vert_num = 2\n outer_edges[0,:] = -1\n for edge_num in range(1,num_outer_edges):\n edge_inds_next = np.where(outer_edges == v_end)\n if (edge_inds_next[0].shape[0] < 1):\n msg = \"Next edge not found for vertex %d\" % v_end\n raise ValueError(msg)\n edge_ind_next = edge_inds_next[0][0]\n vert_ind_next = 0\n if (edge_inds_next[1][0] == 0):\n vert_ind_next = 1\n vert_inds[vert_num] = outer_edges[edge_ind_next, vert_ind_next]\n outer_edges[edge_ind_next, :] = -1\n v_end = vert_inds[vert_num]\n vert_num += 1\n\n return vert_inds", "def get_sound_vertices(self):\n\n vals = np.sum(np.abs(self.base_vectors), axis=1, keepdims=True)\n V = [[v,-v] for v in vals]\n combs = list(itertools.product(*V))\n\n vertices = []\n for cb in combs:\n vertices.append(self.base_vertices+np.array(cb))\n vertices = np.concatenate(vertices, axis=1)\n return vertices", "def get_vertices(self):\n return self.vertList.keys()", "def run(self, infected_graph):\n pos = nx.spring_layout(infected_graph)\n points = np.zeros((len(pos), 2))\n i = 0\n for p in pos:\n points[i] = pos[p]\n i += 1\n \n hull = ConvexHull(points)\n nodes = list(pos)\n return [nodes[p] for p in hull.vertices]", "def convex_hull(image):\n\n corners = find_corners(image)\n\n\n vertices = [corners[0]]\n\n for i in range(len(corners)):\n vertices.extend(\n _convex_hull_side(\n image, corners[i], corners[(i + 1) % len(corners)]))\n\n return vertices", "def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4,b-4,c-4)\n for (a,b,c) in self.triangles if a > 3 and b > 3 and c > 3]", "def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4, b-4, c-4)\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]", "def verts(self):\n return self._xys[:-1]", "def get_vertices(self):\n return self._vertices", "def vertex_incidences(self):\n try:\n return self._vertex_incidences\n except AttributeError:\n self._vertex_incidences = \\\n [ [ v.index(), \n [h.index() for h in v.incident()] \n ] for v in self.Vrepresentation() ]\n return self._vertex_incidences", "def boundary_vertices(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n tri = Triangulation(points[:, 0], points[:, 1], triangles)\n boundary_edges = set()\n for i, neighbors in enumerate(tri.neighbors):\n for k in range(3):\n if neighbors[k] == -1:\n boundary_edges.add((triangles[i, k], triangles[i, (k + 1) % 3]))\n edges = MultiLineString([points[edge, :] for edge in boundary_edges])\n polygons = list(polygonize(edges))\n assert len(polygons) == 1, polygons\n polygon = orient(polygons[0])\n points_list = [tuple(xy) for xy in points]\n indices = np.array([points_list.index(xy) for xy in polygon.exterior.coords])\n return indices[:-1]", "def get_vertices_list(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for ring in part\n for point in ring[0:-1]\n ]", "def get_vertices(self):\n return self.vertices", "def get_vertices(self):\n return self.vertices", "def GetBoundaryEdgesPent(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n node_arranger = np.array([\n [0,1],\n [1,2],\n [2,3],\n [3,4],\n [4,0],\n ])\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],\n self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)\n\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges", "def boundary_facet_ids(self,):\n return self.boundary_facet_ids_", "def vertices(self):\n top_exp = TopologyUtils.TopologyExplorer(self.topods_shape(), ignore_orientation=True)\n return map(Vertex, top_exp.vertices())", "def equatorial_zone_vertices(vertices, pole, width=5):\n return [i\n for i, v in enumerate(vertices)\n if np.abs(np.dot(v, pole)) < np.abs(np.sin(np.pi * width / 180))]", "def get_vertices(self):\n return list(self.vertices.keys())", "def calculate_box(vertices: [[float]]) -> [float]:\n x_coords = [x[0] for x in vertices]\n y_coords = [x[1] for x in vertices]\n z_coords = [x[2] for x in vertices]\n\n return [min(x_coords), min(y_coords), min(z_coords), max(x_coords), max(y_coords), max(z_coords)]", "def sides(self) -> Iterable[Face]:\n side_faces = []\n for i in range(0, len(self.bodies[0].faces)):\n if i != self._bottom_index and i != self._top_index:\n side_faces.append(self.bodies[0].faces[i])\n return side_faces", "def boundary_polygon_by_union(self):\n cell_geoms = [None]*self.Ncells()\n\n for i in self.valid_cell_iter():\n xy = self.nodes['x'][self.cell_to_nodes(i)]\n cell_geoms[i] = geometry.Polygon(xy)\n return ops.cascaded_union(cell_geoms)", "def vertices(self):\n return self.keys()", "def obtener_vertices(self):\n return list(self.vertices.keys())", "def real_boundaries(self):\n return (self._points[0][1], self._points[0][3])", "def GetEdgesHex(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.all_edges,np.ndarray):\n if self.all_edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.all_edges.shape[1] == 2 and p > 1:\n pass\n else:\n return self.all_edges\n\n\n # FIRST GET BOUNDARY FACES\n if not isinstance(self.all_faces,np.ndarray):\n self.GetFacesHex()\n\n # BUILD A 2D MESH\n tmesh = Mesh()\n # tmesh = deepcopy(self)\n tmesh.element_type = \"quad\"\n tmesh.elements = self.all_faces\n tmesh.nelem = tmesh.elements.shape[0]\n del tmesh.faces\n del tmesh.points\n\n # COMPUTE ALL EDGES\n self.all_edges = tmesh.GetEdgesQuad()\n return self.all_edges", "def edges_as_vertices(self) -> Iterable[Tuple[Vec3, Vec3]]:\n v = self.vertices\n for edge in self.edges:\n yield v[edge[0]], v[edge[1]]", "def vertices(self) -> list[Point]:\n a = Point(self.array[..., 0, :], copy=False)\n b = Point(self.array[..., 1, :], copy=False)\n return [a, b]", "def faces(self) -> Polygon:\n return Polygon(self.array, copy=False)", "def get_vertices(self):\n\n return self._vertices", "def GetBoundaryEdgesTet(self):\n\n p = self.InferPolynomialDegree()\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n # FIRST GET BOUNDARY FACES\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesTet()\n\n # BUILD A 2D MESH\n tmesh = Mesh()\n tmesh.element_type = \"tri\"\n tmesh.elements = self.faces\n tmesh.nelem = tmesh.elements.shape[0]\n del tmesh.faces\n del tmesh.points\n\n # ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES\n self.edges = tmesh.GetEdgesTri()", "def edgesWithVertices(self):\n\n return {e for e in self.edges if not (e.vert1 is None or e.vert2 is None)}", "def getContourRep(self):\n\t\tvertex1 = [[self.startX, self.startY]]\n\t\tvertex2 = [[self.startX, self.endY]]\n\t\tvertex3 = [[self.endX, self.startY]]\n\t\tvertex4 = [[self.endX, self.endY]]\n\t\tvertices = [vertex1, vertex2, vertex3, vertex4]\n\t\treturn convexHull(np.asarray(vertices, dtype = np.int32))", "def polygon_contains_holes(self, outer_poly):\n contain_list = []\n for hole_polygon in self.hole_list:\n if all(self.polygon_contains(outer_poly, hole_polygon)):\n contain_list.append(hole_polygon)\n return contain_list", "def edgify(vertices:list)->list:\n edges = []\n for k in range(0, len(vertices) - 1):\n edges.append([vertices[k], vertices[k + 1]])\n return edges", "def convex_hull(self):\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._convex_hull", "def GetInteriorFacesTet(self):\n\n if not isinstance(self.all_faces,np.ndarray):\n self.GetFacesTet()\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesTet()\n\n face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)\n face_flags[face_flags==True] = 1\n face_flags[face_flags==False] = 0\n interior_faces = self.all_faces[face_flags==False,:]\n\n return interior_faces, face_flags", "def boundary_nodes(G,nbunch):\n eboundary = nx.edge_boundary(nx.Graph(G),nbunch)\n nboundary = []\n for u,v in eboundary:\n if (u in nbunch) and (v not in nbunch):\n if u not in nboundary:\n # avoid duplicate entries\n nboundary.append(u)\n elif (u not in nbunch) and (v in nbunch):\n if v not in nboundary:\n # avoids duplicate entries\n nboundary.append(v)\n else:\n raise Exception(\"Error in edge boundary\")\n return nboundary", "def get_verts(problem):\n\tverts = []\n\tfor x in problem:\n\t\tfor element in x:\n\t\t\tif element not in verts:\n\t\t\t\tverts.append(element)\n\treturn verts", "def complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components()\n return list(nx.connected_components(g))", "def get_all_vertices(self):\r\n for vertex in self.__neighbours.keys():\r\n yield vertex", "def vertices(self):\r\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.graph_dict.keys())", "def get_1d_vertices(grid, cut_edges=False):\n\n if len(grid.shape) > 1:\n raise ValueError(\"grid must be 1d array.\")\n diff = np.diff(grid)\n vert = np.zeros(grid.size+1)\n # Interior vertices: halfway between points\n vert[1:-1] = grid[0:-1] + diff/2\n # Edge vertices: tight or reflect\n if cut_edges:\n vert[0] = grid[0]\n vert[-1] = grid[-1]\n else:\n vert[0] = grid[0] - diff[0]/2\n vert[-1] = grid[-1] + diff[-1]/2\n\n return vert", "def facet_with_holes(self,):\n return self.facet_with_holes_", "def get_vertices(self, crs=None):\n if crs is None:\n vertices = []\n for poly_vertices in self.vertices:\n vertices.append([np.array(v) for v in poly_vertices])\n return vertices\n else:\n vertices = []\n for poly_vertices in self.vertices:\n poly = []\n for ring_vertices in poly_vertices:\n poly.append(np.array([_reproject(v[:2], self.crs, crs)\n for v in ring_vertices]))\n vertices.append(poly)\n return vertices", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def find_open_edges_voronoi(graph, grid):\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]]\n p2 = graph.vertices[v[1]]\n cells = list(bresenham(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])))\n hit = False\n\n for c in cells:\n # First check if we're off the map\n if np.amin(c) < 0 or c[0] >= grid.shape[0] or c[1] >= grid.shape[1]:\n hit = True\n break\n # Next check if we're in collision\n if grid[c[0], c[1]] == 1:\n hit = True\n break\n\n # If the edge does not hit on obstacle\n # add it to the list\n if not hit:\n # array to tuple for future graph creation step)\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n return edges", "def rook_neighbors_face(self, face):\n edges = self.cw_face_edges(face)\n return list(set([ self.left_region[edge] for edge in edges]))", "def get_vertices(self):\n return self.graph.keys()", "def computeBoundaries(dof_connectivity, dof_edges, dof_faces, bEdges, bFaces, Nord):\n # Number of boundaries on edges\n nBoundaryEdges = len(bEdges)\n num_dof_in_edge = Nord\n\n # Number of boundaries on faces\n nBoundaryFaces = len(bFaces)\n num_dof_in_face = Nord*(Nord-1)\n\n # Get boundary dofs for edges\n indx_boundary_edges = dof_edges[bEdges,:]\n\n # Get boundary dofs for faces\n if dof_faces.size == 0:\n # No dofs on faces (first order, Nord==1)\n indx_boundary_faces = np.zeros((1,0), dtype=np.int)\n else:\n indx_boundary_faces = dof_faces[bFaces,:]\n\n # Get indexes of boundary dofs\n tmp1 = np.reshape(indx_boundary_edges, (nBoundaryEdges*num_dof_in_edge))\n tmp2 = np.reshape(indx_boundary_faces, (nBoundaryFaces*num_dof_in_face))\n indx_boundary_dofs = np.hstack((tmp1, tmp2))\n\n # Get total number of dofs in the mesh\n total_num_dofs = np.max(dof_connectivity) + 1\n\n # Get indexes of inner dofs\n indx_inner_dofs = np.setdiff1d(np.arange(0,total_num_dofs), indx_boundary_dofs)\n\n return indx_inner_dofs, indx_boundary_dofs", "def boundary_edges_from_face_selection(bm):\n selected_faces = [f for f in bm.faces if f.select]\n all_edges = list({e for f in selected_faces for e in f.edges})\n edge_is_boundary = (\n lambda e: len({f for f in e.link_faces if f in selected_faces}) == 1\n )\n return [e for e in all_edges if edge_is_boundary(e)]", "def GetInteriorEdgesHex(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesHex()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesHex()\n\n edge_flags = in2d(self.all_edges.astype(self.edges.dtype),self.edges,consider_sort=True)\n edge_flags[edge_flags==True] = 1\n edge_flags[edge_flags==False] = 0\n interior_edges = self.all_edges[edge_flags==False,:]\n self.interior_edges = interior_edges\n\n return interior_edges, edge_flags", "def no_non_adjacent_vertices(self):\n clauses = []\n for v in range(0,self.graph.num_vertices):\n non_neighbours = sorted(list(set(range(0,self.graph.num_vertices))\n - set([v])\n - set(self.graph.edges[v])))\n for nv in non_neighbours:\n for position in range(0,self.graph.num_vertices-1):\n clause = [ ClauseVariable(True,v,position),\n ClauseVariable(True,nv,position+1)]\n clauses.append(clause)\n return clauses", "def interior(self):\n return Shape(self - self.edge('inner'))", "def GetFacesHex(self):\n\n # DETERMINE DEGREE\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.all_faces,np.ndarray):\n if self.all_faces.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.all_faces.shape[1] == 4 and p > 1:\n pass\n else:\n return self.all_faces\n\n node_arranger = NodeArrangementHex(p-1)[0]\n fsize = int((p+1)**3)\n\n # GET ALL FACES FROM THE ELEMENT CONNECTIVITY\n faces = np.concatenate((np.concatenate((\n np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],\n self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),\n self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),\n self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)\n\n # REMOVE DUPLICATES\n self.all_faces, idx = unique2d(faces,consider_sort=True,order=False,return_index=True)\n\n face_to_element = np.zeros((self.all_faces.shape[0],2),np.int64)\n face_to_element[:,0] = idx % self.elements.shape[0]\n face_to_element[:,1] = idx // self.elements.shape[0]\n\n self.face_to_element = face_to_element\n\n return self.all_faces", "def GetBoundaryFacesTet(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.faces,np.ndarray):\n if self.faces.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.faces.shape[1] == 3 and p > 1:\n pass\n else:\n return\n\n node_arranger = NodeArrangementTet(p-1)[0]\n\n # CONCATENATE ALL THE FACES MADE FROM ELEMENTS\n all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],\n self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0)\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES\n freqs_inv = itemfreq(inv)\n faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.faces = uniques[faces_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)\n # THE FOLLOWING WILL COMPUTE FACES BASED ON SORTING AND NOT TAKING INTO ACCOUNT\n # THE ELEMENT CONNECTIVITY\n # boundary_face_to_element[:,0] = np.remainder(idx[faces_ext_flags],self.elements.shape[0])\n # boundary_face_to_element[:,1] = np.floor_divide(idx[faces_ext_flags],self.elements.shape[0])\n # OR EQUIVALENTLY\n # boundary_face_to_element[:,0] = idx[faces_ext_flags] % self.elements.shape[0]\n # boundary_face_to_element[:,1] = idx[faces_ext_flags] // self.elements.shape[0]\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES\n all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)\n all_faces_in_faces = np.where(all_faces_in_faces==True)[0]\n\n # boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)\n boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]\n boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]\n self.faces = self.faces.astype(np.uint64)\n self.boundary_face_to_element = boundary_face_to_element", "def GetBoundaryLoops(self):\n\n self.__do_essential_memebers_exist__()\n\n if self.InferBoundaryElementType() != \"line\":\n raise NotImplementedError(\"Computing boundary loops is only supported for tri/quad meshes for now\")\n\n self.GetEdges()\n\n # First create a node to neighbour map i.e. node as key and its two neighbouring nodes as value\n nodeToNeighboursMap = dict()\n for i in range(self.edges.shape[0]):\n\n if self.edges[i,0] not in nodeToNeighboursMap:\n nodeToNeighboursMap[self.edges[i,0]] = [self.edges[i,1],-1]\n else:\n nodeToNeighboursMap[self.edges[i,0]][1] = self.edges[i,1]\n\n if self.edges[i,1] not in nodeToNeighboursMap:\n nodeToNeighboursMap[self.edges[i,1]] = [self.edges[i,0],-1]\n else:\n nodeToNeighboursMap[self.edges[i,1]][1] = self.edges[i,0]\n\n # Now create a vector of face loops\n faceLoops = []\n while nodeToNeighboursMap:\n # Insert the first node from node to edge map and its two neighbours in order and erase it from the map\n faceLoop = []\n mapBegin = next(iter(nodeToNeighboursMap))\n faceLoop.append(nodeToNeighboursMap[mapBegin][0])\n faceLoop.append(mapBegin)\n faceLoop.append(nodeToNeighboursMap[mapBegin][1])\n nodeToNeighboursMap.pop(mapBegin, None)\n\n while True:\n # Pick the last node in the current face loop and find its neighbours\n if faceLoop[-1] in nodeToNeighboursMap:\n tmp = faceLoop[-1]\n mapIter = nodeToNeighboursMap[faceLoop[-1]]\n # Check if we have not reached the end of the loop i.e. the first element\n if mapIter[0] != faceLoop[0] and mapIter[1] != faceLoop[0]:\n if mapIter[0] == faceLoop[-2]:\n faceLoop.append(mapIter[1])\n elif mapIter[1] == faceLoop[-2]:\n faceLoop.append(mapIter[0])\n else:\n nodeToNeighboursMap.pop(faceLoop[0], None)\n\n nodeToNeighboursMap.pop(tmp, None)\n else:\n faceLoop = np.array(faceLoop)\n faceLoops.append(faceLoop)\n break\n\n return faceLoops", "def get_raw_bounds(self) -> [Vector, Vector]:\n\t\tverts = np.array([v.co for mesh in self._meshes for v in mesh.data.vertices])\n\t\tbbox_min = Vector([*np.min(verts, axis=0)])\n\t\tbbox_max = Vector([*np.max(verts, axis=0)])\n\t\treturn bbox_min, bbox_max" ]
[ "0.6706478", "0.6694034", "0.6630309", "0.6623699", "0.66021216", "0.65841985", "0.6554914", "0.64516014", "0.6362444", "0.63408965", "0.6277447", "0.62755173", "0.61893046", "0.6162313", "0.6147987", "0.6136316", "0.612656", "0.611977", "0.61181563", "0.610831", "0.60700697", "0.60514164", "0.6050791", "0.60379523", "0.6012995", "0.60123616", "0.59998924", "0.59840435", "0.5983638", "0.59547126", "0.5950867", "0.59497195", "0.5946131", "0.59277135", "0.5923645", "0.5918951", "0.5894998", "0.5891031", "0.58857465", "0.588565", "0.5883016", "0.58815306", "0.5880462", "0.58771294", "0.58369046", "0.5814571", "0.5813578", "0.5809688", "0.5806052", "0.57907784", "0.5787802", "0.57846385", "0.57846385", "0.577803", "0.57766837", "0.57760555", "0.57734275", "0.57724667", "0.57658225", "0.57626414", "0.5758818", "0.5753384", "0.5751088", "0.5741589", "0.5741306", "0.5734224", "0.57221365", "0.5716226", "0.5710236", "0.56952894", "0.5688063", "0.5676668", "0.56723934", "0.5671048", "0.5668207", "0.5664528", "0.56613356", "0.5658971", "0.5653115", "0.5647465", "0.56338584", "0.56249946", "0.5619586", "0.5618682", "0.5608694", "0.56038815", "0.56038815", "0.56038815", "0.5600741", "0.5595687", "0.5593442", "0.5590892", "0.55858135", "0.5585259", "0.5583203", "0.5582944", "0.5582843", "0.5579941", "0.557377", "0.5564451" ]
0.73313373
0
Return the dual of the current DCEL.
def dual(self): def set_twins(): for edge_idx in range(0, len(dual_dcel.edges), 2): dual_dcel.edges[edge_idx].twin = dual_dcel.edges[edge_idx + 1] dual_dcel.edges[edge_idx + 1].twin = dual_dcel.edges[edge_idx] def set_next_and_previous(): for face in dual_dcel.faces: face_edges = [edge for edge in dual_dcel.edges if edge.incident_face == face] for edge in face_edges: if(not edge.get_destination().is_infinity()): edge.nxt = [e for e in face_edges if e.origin == edge.get_destination()][0] if(not edge.origin.is_infinity()): edge.prev = [e for e in face_edges if edge.origin == e.get_destination()][0] dual_dcel = DCEL() for edge in self.edges: incident_face = dual_dcel.add_face(Face(circumcentre=edge.twin.origin.as_points())) origin = dual_dcel.add_vertex(Vertex(coordinates=edge.incident_face.circumcentre)) dual_edge = HalfEdge( origin=origin, incident_face=incident_face ) incident_face.outer_component = dual_edge origin.incident_edge = dual_edge dual_dcel.edges.append(dual_edge) set_twins() set_next_and_previous() return dual_dcel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dual(self):\n return dual_array(self)", "def getdualobj(self,whichsol_):\n dualobj_ = ctypes.c_double()\n res = __library__.MSK_XX_getdualobj(self.__nativep,whichsol_,ctypes.byref(dualobj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n dualobj_ = dualobj_.value\n _dualobj_return_value = dualobj_\n return (_dualobj_return_value)", "def dual_objective(self, dual_coeffs):\n primal = self.model._sdca_primal_dual_relation(self.l_l2sq,\n dual_coeffs)\n prox_l2_value = 0.5 * self.l_l2sq * np.linalg.norm(primal) ** 2\n return self.model.dual_loss(dual_coeffs) - prox_l2_value", "def dualGrid(self):\n return self._dual_grid( )", "def exterior_der(self):\n from utilities import format_unop_txt, format_unop_latex\n if self._exterior_derivative is None:\n vmodule = self._vmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n resu = vmodule.alternating_form(self._tensor_rank+1, name=rname, \n latex_name=rlname)\n for dom, rst in self._restrictions.iteritems():\n resu._restrictions[dom] = rst.exterior_der()\n self._exterior_derivative = resu\n return self._exterior_derivative", "def double(self):\n return self._double", "def gen_dual_func(self):\n if 0 in self.sig:\n # We are degenerate, use the right complement\n return self.right_complement_func\n else:\n Iinv = self.pseudoScalar.inv().value\n gmt_func = self.gmt_func\n @numba.njit\n def dual_func(Xval):\n return gmt_func(Xval, Iinv)\n return dual_func", "def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))", "def dual(self, I=None) -> 'MultiVector':\n if I is None:\n return self.layout.MultiVector(value=self.layout.dual_func(self.value))\n else:\n Iinv = I.inv()\n\n return self * Iinv", "def getD(self):\r\n return self.D", "def disagreement(self):\n return 0.5*(np.dot(np.dot(np.transpose(self.x),self.L),self.x)).item(0)", "def derivative ( self ):\n return self.__derivative", "def get_dual_val(self, var_name, pos):\n val = self.get_other_value(self.dual_var, var_name, pos)\n if not self.pdv_to_csv: # if not saved to csv file\n return val\n else: # otherwise, we should get the file path and read from the file to array or mat\n f_path = os.path.join(self.root_dir, 'dual_vars', var_name, str(val) + '.csv')\n df = pd.read_csv(f_path, header = None) # first read csv file into a pandas data frame and then transform\n return np.asmatrix(df.values)", "def diffuse_coefficient(self):\n return self._diffuse_coefficient", "def dual(self):\n letter = self.letter()\n # the self-dual cases\n if letter != 'BC' and letter[0] in ['B','C']:\n if letter == 'BB': letter = 'CC'\n elif letter == 'CC': letter = 'BB'\n elif letter[0] == 'B': letter = 'C' + letter[1:]\n elif letter[0] == 'C': letter = 'B' + letter[1:]\n rank = self._rank\n if self.is_affine():\n rank -= 1\n twist = self._twist\n return QuiverMutationType(letter,rank,twist)\n # the cases F and G have non-trivial duality in some cases\n elif letter in ['F','G']:\n if self.is_finite(): return self\n elif self.is_affine():\n rank = self._rank - 1\n twist = - self._twist\n elif self.is_elliptic():\n twist = self._twist\n rank = self._rank - 2\n if letter == 'F':\n if self._twist == [2,2]:\n twist == [1,1]\n if self._twist == [1,1]:\n twist == [2,2]\n if letter == 'G':\n if self._twist == [3,3]:\n twist = [1,1]\n elif self._twist == [1,1]:\n twist = [3,3]\n else: rank = self._rank\n return QuiverMutationType(letter,rank,twist)\n else:\n return self", "def d2(self):\r\n return self.d1() - self.sigma*self.t**0.5", "def DOR(self):\n a, c, d, b = self.to_ccw()\n ad, bc = a * d, b * c\n return _div(ad, bc)", "def getdualobj(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getdualobj(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _dualobj_return_value = resargs\n return _dualobj_return_value", "def valuation(self):\n\t\tif self.__tete:\n\t\t\treturn self.__tete.plus_grand().get_coefficient()\n\t\telse:\n\t\t\treturn rationnel()", "def getDouble(self, int: int, int2: int) -> float:\n ...", "def D(self):\n if not hasattr(self, '_D'):\n self.logger.warning('The differential operator G.D is not '\n 'available, we need to compute it. Explicitly '\n 'call G.compute_differential_operator() '\n 'once beforehand to suppress the warning.')\n self.compute_differential_operator()\n return self._D", "def dualy(self, arg, **kwargs):\n ax = self.alty(**kwargs)\n self._dualy_arg = arg\n self._dualy_overrides()\n return ax", "def dual_k_Schur(self):\n return DualkSchurFunctions(self)", "def ddalf(x):\n return dalf_spl.derivatives(x)[1]", "def get_linear_dispersion(self):\n if \"DX\" not in self._results_df or \"DY\" not in self._results_df:\n self.calc_linear_dispersion()\n return self._results_df.loc[:, [\"S\", \"DX\", \"DY\"]]", "def activate_der(self):\r\n\t\treturn self.value * (1 - self.value)", "def _der(self, x):\n y, dydx = self._evalAndDer(x)\n return dydx # Sadly, this is the fastest / most convenient way...", "def _der(self, x):\n y, dydx = self._evalAndDer(x)\n return dydx # Sadly, this is the fastest / most convenient way...", "def get_bessel_derivative(self):\n return np.array([t.der_bessel for t in self._trc])", "def getDensityLaw(self):\n return self.densityLaw", "def get_double_power_law(self, alpha, beta, M_star):\n\t\tdenom = 10.0**(0.4*(alpha + 1.0)*(self.M_grid - M_star))\n\t\tdenom += 10.0**(0.4*(beta + 1.0)*(self.M_grid - M_star))\n\t\tdn = 1.0/denom\n\t\tdn /= np.sum(dn)\n\t\treturn dn", "def ddalf(x):\n return dalf_spl.derivatives(x)[1]", "def darcy_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n if abs(i[0]) < 1e-4:\n return i[1] - o[1]\n\n visc_i = visc_mix_ph(i, T0=self.inl[0].T.val_SI)\n visc_o = visc_mix_ph(o, T0=self.outl[0].T.val_SI)\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n v_o = v_mix_ph(o, T0=self.outl[0].T.val_SI)\n\n re = 4 * abs(i[0]) / (np.pi * self.D.val * (visc_i + visc_o) / 2)\n\n return ((i[1] - o[1]) - 8 * abs(i[0]) * i[0] * (v_i + v_o) / 2 *\n self.L.val * lamb(re, self.ks.val, self.D.val) /\n (np.pi ** 2 * self.D.val ** 5))", "def _der(self, x):\n return self._evalOrDer(x, False, True)[0]", "def r_d(self, tl):\n\t return self.RD0*exp(self.HKR/(R*self.TO)*(1. - self.TO/tl))", "def get_dual_change_value(self, var_change_name, pos):\n return self.get_other_value(self.dual_var_change, var_change_name, pos)", "def get_linearEvolving(self):\n return self.get_linearEvolvingEigen()", "def get_2nd_derivative(self, output_name, wrt):\n \n return self.hessian[wrt[0]][wrt[1]][output_name]", "def differentiate(self): \n if not self.is_nan() :\n return RatTerm(RatNum(self.coeff * self.expt), (self.expt - 1))\n else :\n return RatTerm(RatNum(1, 0), 0)", "def _derivativeTerm(self):\n\n\t\treturn self._Kd * (self._getErrorFunction() - self._previousError) / self._dt", "def _dvolume_dR(self):\n\n dvdr = 0.\n\n return dvdr", "def get(self, swap=False):\n if swap:\n return self.get_as_abL()\n else:\n return self.Lab", "def _dualize(self, block, unfixed=[]):\n #\n # Collect linear terms from the block\n #\n A, b_coef, c_rhs, c_sense, d_sense, vnames, cnames, v_domain = collect_linear_terms(block, unfixed)\n #\n # Construct the block\n #\n if isinstance(block, Model):\n dual = ConcreteModel()\n else:\n dual = Block()\n for v, is_indexed in vnames:\n if is_indexed:\n setattr(dual, v+'_Index', Set(dimen=None))\n setattr(dual, v, Var(getattr(dual, v+'_Index')))\n else:\n setattr(dual, v, Var())\n for cname, is_indexed in cnames:\n if is_indexed:\n setattr(dual, cname+'_Index', Set(dimen=None))\n setattr(dual, cname, Constraint(getattr(dual, cname+'_Index')))\n setattr(dual, cname+'_lower_', Var(getattr(dual, cname+'_Index')))\n setattr(dual, cname+'_upper_', Var(getattr(dual, cname+'_Index')))\n else:\n setattr(dual, cname, Constraint())\n setattr(dual, cname+'_lower_', Var())\n setattr(dual, cname+'_upper_', Var())\n dual.construct()\n #\n # Add variables\n #\n # TODO: revisit this hack. We shouldn't be calling\n # _getitem_when_not_present()\n #\n for name, ndx in b_coef:\n v = getattr(dual, name)\n if not ndx in v:\n v._getitem_when_not_present(ndx)\n #\n # Construct the objective\n #\n if d_sense == minimize:\n dual.o = Objective(expr=sum(- b_coef[name,ndx]*getattr(dual,name)[ndx] for name,ndx in b_coef), sense=d_sense)\n else:\n dual.o = Objective(expr=sum(b_coef[name,ndx]*getattr(dual,name)[ndx] for name,ndx in b_coef), sense=d_sense)\n #\n # Construct the constraints\n #\n for cname in A:\n c = getattr(dual, cname)\n c_index = getattr(dual, cname+\"_Index\") if c.is_indexed() else None\n for ndx,terms in iteritems(A[cname]):\n if not c_index is None and not ndx in c_index:\n c_index.add(ndx)\n expr = 0\n for term in terms:\n v = getattr(dual,term.var)\n if not term.ndx in v:\n v.add(term.ndx)\n expr += term.coef * v[term.ndx]\n if not (cname, ndx) in c_rhs:\n c_rhs[cname, ndx] = 0.0\n if c_sense[cname,ndx] == 'e':\n c.add(ndx, expr - c_rhs[cname,ndx] == 0)\n elif c_sense[cname,ndx] == 'l':\n c.add(ndx, expr - c_rhs[cname,ndx] <= 0)\n else:\n c.add(ndx, expr - c_rhs[cname,ndx] >= 0)\n for (name, ndx), domain in iteritems(v_domain):\n v = getattr(dual, name)\n flag = type(ndx) is tuple and (ndx[-1] == 'lb' or ndx[-1] == 'ub')\n if domain == 1:\n if flag:\n v[ndx].domain = NonNegativeReals\n else:\n v.domain = NonNegativeReals\n elif domain == -1:\n if flag:\n v[ndx].domain = NonPositiveReals\n else:\n v.domain = NonPositiveReals\n else:\n if flag:\n # TODO: verify that this case is possible\n v[ndx].domain = Reals\n else:\n v.domain = Reals\n return dual", "def getLeverage(self,LeftTup,RightTup):\n tup=LeftTup+RightTup\n _nom=self.getSupport(tup)\n _den=self.getSupport(LeftTup) * self.getSupport(RightTup)\n _leverage=_nom - _den\n return (_leverage)", "def price_diff_d(self):\n try:\n return(self.direction*(self.price_close - self.price_open))\n except:\n return", "def price_diff_rel_d(self): \n try:\n return(self.price_diff_d / self.price_open)\n except:\n return", "def get_voltage(self):\n self._raise_not_implemented()", "def dfr(self):\n return self.table[1, 0] / (self.table[1, 0] + self.table[1, 1])", "def differential(self):\n return self._differential", "def __get_deccelleration(self):\n slowdown_span = (4.0/ 5.0) * (self.safe_distance - self.critical_distance)\n return (self.speed ** 2.0) / (2.0 * slowdown_span)", "def dgdy(self, X):\n \n return 3*X[1]**2", "def _rtol(self):\n return self.__class__.RTOL", "def dsdlogdp(self):\n return self.dndlogdp.mul(self.s_multiplier)", "def getValue(self):\n return DPxGetDinValue()", "def dVdx(self, sys):\n return self._dfdx_fcn(self.pes1.dVdx(sys), self.pes2.dVdx(sys))", "def inner_rad(self) -> Quantity:\n return self._inner_rad", "def fangle_degr(self):\r\n\r\n return self._versor_1.angle_degr(self._versor_2)", "def Drep(self):\n sinE = np.sin(self.E())\n cosE = np.cos(self.E())\n return -self.alpha()*sinE+(self.beta()+self.GAMMA)*cosE", "def compute_dual(lattice, row_wise: bool = True):\n lat = to_row_wise(lattice, row_wise)\n d = np.linalg.inv(lat @ lat.T) @ lat\n if not row_wise:\n d = d.T\n return d", "def getValue(self):\n return self.__diastolic", "def getExponentAsDouble(self):\n return _libsbml.Unit_getExponentAsDouble(self)", "def lsd(self):\n return self._lsd", "def dual(self):\n comps = self.irreducible_components()\n return QuiverMutationType( [comp.dual() for comp in comps ] )", "def reference_voltage(self) -> float:\n return self._ref_voltage", "def value(self): \r\n c = self.nd1() * self.s * math.exp(-self.div * self.t)\r\n c -= self.nd2() * self.x * math.exp(-self.rf * self.t)\r\n \r\n return c", "def get_van_Der_Waals_radius(self):\n return self.van_Der_Waals_radius", "def Dre(self):\n er = self.er()\n sinE = np.sin(self.E())\n cosE = np.cos(self.E())\n return self.alpha()*(cosE- er)+ \\\n (self.beta()+self.GAMMA)*sinE", "def read_double(self):\n return self._packers[\"d\"].unpack(self.read(8))[0]", "def loevinger_coeff(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n cov = self.covar()\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif cov == 0.0:\n return 0.0\n else:\n return _div(cov, min(p1 * q2, p2 * q1))", "def get_voltage(self):\n return self.environment.get_voltage(self.neuron_id)", "def getRHS(self, freq):\n\n # Get sources for the frequncy(polarizations)\n Src = self.survey.getSrcByFreq(freq)[0]\n S_e = Src.S_e(self)\n return -1j * omega(freq) * S_e", "def _dy(self, T):\n return self._h(np.diff(T)) * self._a / self._m / self._c * np.diff(T) * np.array([1, -1])", "def discharge_coefficient(self) -> _VectorisedFloat:\n return 0.6", "def compute_differential_operator(self):\n\n v_in, v_out, weights = self.get_edge_list()\n\n n = len(v_in)\n Dr = np.concatenate((np.arange(n), np.arange(n)))\n Dc = np.empty(2*n)\n Dc[:n] = v_in\n Dc[n:] = v_out\n Dv = np.empty(2*n)\n\n if self.lap_type == 'combinatorial':\n Dv[:n] = np.sqrt(weights)\n Dv[n:] = -Dv[:n]\n elif self.lap_type == 'normalized':\n Dv[:n] = np.sqrt(weights / self.dw[v_in])\n Dv[n:] = -np.sqrt(weights / self.dw[v_out])\n else:\n raise ValueError('Unknown lap_type {}'.format(self.lap_type))\n\n self._D = sparse.csc_matrix((Dv, (Dr, Dc)), shape=(n, self.N))", "def x2(self):\n return self._x2", "def dVdx(self, sys):\n dx2 = sys.positions * sys.positions - self.x0 * self.x0\n return 4 * self.A * sys.positions * dx2", "def getDoubleValue(self):\n return _libsbml.ConversionOption_getDoubleValue(self)", "def getMath(self):\n return _libsbml.Constraint_getMath(self)", "def double_value(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"double_value\")", "def GetRedistributionValue(Md):\n X = Md.SteadyState()\n Mom = Md.IP.get_Moments(Md.ubar,Md.ubar,Md.tau)\n Ealpha1taulogalpha = (1-delta)*X[iEAlph]*Mom[3]/(1-(1-delta)*Mom[0])\n return (-X[iElogAlpha]+Ealpha1taulogalpha/X[iEAlph] )/(1-beta)", "def dVdx(self, sys):\n dx = sys.positions - self.x0\n k = self.omega*self.omega*sys.mass\n return self.A*k*dx", "def get_dx(self):\n return self.__dx", "def discharge_coefficient(self) -> _VectorisedFloat:\n window_ratio = np.array(self.window_width / self.window_height)\n coefs = np.empty(window_ratio.shape + (2, ), dtype=np.float64)\n\n coefs[window_ratio < 0.5] = (0.06, 0.612)\n coefs[np.bitwise_and(0.5 <= window_ratio, window_ratio < 1)] = (0.048, 0.589)\n coefs[np.bitwise_and(1 <= window_ratio, window_ratio < 2)] = (0.04, 0.563)\n coefs[window_ratio >= 2] = (0.038, 0.548)\n M, cd_max = coefs.T\n\n window_angle = 2.*np.rad2deg(np.arcsin(self.opening_length/(2.*self.window_height)))\n return cd_max*(1-np.exp(-M*window_angle))", "def get_derivative(self, output_name, wrt):\n \n return self.gradient[wrt][output_name]", "def __call__(self, x):\n\n self.dbeads.q = x\n e = self.dforces.pot # Energy\n g = -self.dforces.f # Gradient\n\n return e, g", "def df2dx5_func(self,X):\n result = (\n -self.rj*self.rm*self.k_spr*self.b_spr * (\n np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0]))\n * ((self.rm*X[4] + self.rj*X[0])>=0)\n ) / self.Ij\n )\n return(result)", "def get_double_mode(self):\r\n msg = struct.pack('>2B', 56, 1)\r\n response = self.query(msg)\r\n if response[1] == 254:\r\n return 'Subtractive mode selected.'\r\n elif response[1] == 1:\r\n return 'Additive mode selected.'\r\n else:\r\n raise ValueError('Mode not recognised.')", "def get_library(self):\r\n\r\n normal_terms = self.normal_term.get_normal_terms()\r\n derivative_terms = self.derivative_term.get_derivative_terms()\r\n\r\n library = library_matrix_mat(normal_terms, derivative_terms)\r\n\r\n return library", "def get_vsolar(self):\n return self.read_register(4098, 1, 3)", "def dcpl(self):\n # easy enough\n return self._dataset._pyre_id.dcpl", "def second_derivative(x, y, finitediff_scheme = 'central'):\n if finitediff_scheme == 'central':\n y = [0] + list(y) + [0]\n numerator = np.array([y[i-1] - 2*y[i] + y[i+1] for i in range(1, len(y)-1)])\n denominator = (x[1]-x[0])**2\n return numerator/denominator \n \n elif finitediff_scheme == 'five point stencil':\n y = [0, 0] + list(y) + [0, 0]\n numerator = np.array([-y[i-2] + 16*y[i-1] - 30*y[i] + 16*y[i+1] - y[i+2] for i in range(2, len(y)-2)])\n denominator = 12*(x[1] - x[0])**2 \n return numerator/denominator", "def double(self, number):\n return 2 * number", "def math(self):\n return self.__math", "def d(self, combo_name='Combo 1'):\n\n # Calculate and return the local displacement vector\n return matmul(self.T(), self.D(combo_name))", "def compute_dual_line(P):\n return Line(P.x, -P.y)", "def Drepp(self):\n sinE = np.sin(self.E())\n cosE = np.cos(self.E())\n return -self.alpha()*cosE-(self.beta()+self.GAMMA)*sinE", "def radial2(self) -> float:\n return self.distortion_coefficients[0]", "def from_dual(self):\n return \"\"", "def rref_den(self, *, method='auto', keep_domain=True):\n return _dm_rref_den(self, method=method, keep_domain=keep_domain)", "def discriminant(self):\r\n return self.__b**2 - (4 * self.__a * self.__c)" ]
[ "0.68338317", "0.6423258", "0.5941094", "0.5921593", "0.57853687", "0.57633495", "0.5762203", "0.57469624", "0.5682356", "0.5682002", "0.5639214", "0.5613858", "0.55889916", "0.55681217", "0.5535743", "0.5530557", "0.55077", "0.54985017", "0.5457592", "0.54481125", "0.5441906", "0.54185843", "0.54157156", "0.5402935", "0.5397527", "0.5395019", "0.53673464", "0.53673464", "0.5318552", "0.5295986", "0.5295362", "0.5291567", "0.5275379", "0.52709615", "0.52609605", "0.52464855", "0.5236956", "0.5211155", "0.5199178", "0.51955837", "0.51953065", "0.5193646", "0.5184661", "0.51823163", "0.51732075", "0.5163423", "0.5155392", "0.5145551", "0.5132707", "0.5126593", "0.5125853", "0.5118463", "0.51146215", "0.511279", "0.5112368", "0.5099324", "0.50936145", "0.5086796", "0.5082347", "0.5068198", "0.5062056", "0.5047683", "0.5041909", "0.50340825", "0.50323194", "0.50317", "0.5029385", "0.50284225", "0.5011958", "0.5010526", "0.5003101", "0.49949816", "0.4984847", "0.49839613", "0.4983579", "0.49804473", "0.4967461", "0.49668983", "0.49668005", "0.4959398", "0.49570554", "0.49544296", "0.49519864", "0.4941775", "0.4941569", "0.49387416", "0.49365473", "0.4935139", "0.49344242", "0.49334443", "0.4930562", "0.4930437", "0.49275684", "0.49270517", "0.49183542", "0.49134526", "0.4910682", "0.49093395", "0.49079818", "0.49079365" ]
0.6778644
1
Printfriendly representation of the DCEL object.
def __repr__(self): return ( '<DCEL (' 'vertices:\n {obj.vertices},\n' 'edges:\n {obj.edges},\n' 'faces:\n {obj.faces}>'.format(obj=self) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n cls = self.__class__.__name__\n return '%s(%s)' % (cls, repr(self.d))", "def __str__(self):\n result=\"curv %f d0 %f z0 %f ctheta %f phi %f barcode %d\"%(self.curv,self.d0,self.z0,self.ctheta,self.phi,self.barcode)\n return result", "def printObj(self):\n return 'patient_id:{}, medication:{}, frequency:{}, start_dt:{},'\n 'end_dt:{}, noti_type:{}'.format(\n self.patients.data,\n self.medication.data,\n self.frequency.data,\n self.start_dt,\n self.end_dt.data,\n self.noti_type.data)", "def output(self):\n to_write = 'C '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x'])+' '\n to_write += str(self.offset[1] + self.def_field['y'])+' '\n to_write += str(self.def_field['radius'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write", "def __str__(self):\n from nodepy.utils import array2strings\n\n c = array2strings(self.c,printzeros=True)\n A = array2strings(self.A)\n b = array2strings(self.b,printzeros=True)\n lenmax, colmax = _get_column_widths([A,b,c])\n\n s=self.name+'\\n'+self.info+'\\n'\n for i in range(len(self)):\n s+=c[i].ljust(colmax+1)+'|'\n for j in range(len(self)):\n s+=A[i,j].ljust(colmax+1)\n s=s.rstrip()+'\\n'\n s+='_'*(colmax+1)+'|'+('_'*(colmax+1)*len(self))+'\\n'\n s+= ' '*(colmax+1)+'|'\n for j in range(len(self)):\n s+=b[j].ljust(colmax+1)\n return s.rstrip()", "def __repr__(self):\n return self.pretty_print(self.__dict__)", "def __repr__(self):\n values = ', '.join(f'{k}={v}' for k, v in self.variables.items())\n return f'D({values})'", "def __str__(self):\n txt = \"%s:\\n\" % self.name\n txt += \" Charge: %.4f\\n\" % self.charge\n txt += \" Radius: %.4f\" % self.radius\n return txt", "def __str__(self):\n txt = ''\n if self.PrintHeader:\n txt = \" |\" + \"|\".join(sorted(self.rows[0].keys())).expandtabs() + \"|\"\n txt += \"\\n\"\n txt += \"|-\"\n for r in self.rows:\n txt += \"\\n|\"\n txt += \"|\".join([str(uround(r[key] , 2) if isinstance(r[key], (int, long, float, complex , Variable,AffineScalarFunc )) else r[key]) for key in sorted(self.rows[0].keys())]) + \"|\"\n txt += \"\\n|-\"\n if self.PrintSum:\n txt += \"\\n\"\n sumRow = self.GetSumRow()\n txt += \"| |\" + \"|\".join( [str(uround(sumRow[key] , 2) if isinstance(sumRow[key], (int, long, float, complex , Variable ,AffineScalarFunc )) else sumRow[key]) for key in sorted(self.rows[0].keys())[1:]] ) + \"|\"\n\n return txt", "def __repr__(self):\n\n return '%s(%r, %r, %s)' % (\n self, self.lod, self.cvs_path, self._format_entries(),\n )", "def __str__(self):\n datastr = self.f_val_to_str()\n return_string = \"%s %s\" % (self.f_get_class_name(), self.v_full_name)\n if self.v_comment:\n return_string += \" (`%s`)\" % self.v_comment\n if datastr:\n return_string += \": \" + datastr\n\n return return_string", "def __str__(self):\n\n string = \"values:\\n\\t\"\n string += \" x \".join(map(str, self.shape))\n\n string += \" {} ({})\\n\".format(type(self.values).__name__, self.values.dtype)\n\n if self.print_values is True:\n string += str(self.values) + \"\\n\"\n\n string += \"dims:\\n\\t\"\n\n string += \"{}\\n\".format(self.dims)\n\n string += \"coords:\\n\\t\"\n string += \"\\n\\t\".join(map(repr, self.coords))\n\n string += \"\\n\"\n\n string += \"attrs:\\n\"\n\n for ix, key in enumerate(self.attrs.keys()):\n if ix == self.max_print_attrs:\n string += \"\\t+%i attrs\" % (len(self.attrs) - self.max_print_attrs)\n break\n string += \"\\t{!r}: {!r}\\n\".format(key, self.attrs[key])\n\n return string", "def __repr__(self) -> str:\n\n thresh = np.get_printoptions()[\"threshold\"]\n np.set_printoptions(threshold=20)\n extra_chars = len(self.__class__.__name__)\n arr_str = \"data=\" + str(self.data).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 6))\n shape_str = (\n \" \" * extra_chars\n + \" shape=\"\n + str(self.shape).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 7))\n )\n dtype_str = \" \" * extra_chars + \" dtype=\" + str(self.dtype)\n np.set_printoptions(threshold=thresh)\n return \"{klass}({data},\\n{shape},\\n{dtype})\".format(\n klass=self.__class__.__name__,\n data=arr_str,\n shape=shape_str,\n dtype=dtype_str,\n )", "def __str__(self):\n print_info = f\"\\nStudent ID: {self._id}, Name: {self._name}, \" \\\n f\"Year: {self._year} \\nPhone: {str(self._phone)}, \" \\\n f\"Address: {str(self._address)} \" \\\n f\"\\nClasses: {str(self._classes)}\" \\\n f\"\\nBirth Date: {self._date}\"\n return print_info", "def __str__(self):\n return f\"{self.__class__.__name__}:\\n{self._axl_data}\"", "def __str__(self):\n s = \"--\\n\"\n for element in self:\n s += element.__str__() + \"\\n\"\n s += \"--\"\n \"\"\"\n # Uncomment if you want to see the internal structure\n s = \"\\n--\\n\"\n for i in xrange(self.size):\n s += \"%d [%s, %s]\\n\" % ( i, self.slot[i], self.data[i] )\n s += \"--\"\n \"\"\"\n return s", "def __str__(self):\n return self.printable()", "def __str__(self):\n\n outstr = 'gear wheel data:\\n'\n # output gear data\n for date in self.data:\n outstr += date.ljust(10) + ':\\t' + str(self.data.get(date)) + '\\n'\n\n # output modification data\n if self.modifications:\n outstr += '\\nflank modifications:\\n'\n for date in self.modifications:\n outstr += date.ljust(10) + ':\\t' + str(self.modifications.get(date)) + '\\n'\n\n # output tooth form coordinates\n if self.formcoords:\n # upper and lower index of point-array\n outstr += '\\ntooth form coordinates:\\n'\n for coord in self.formcoords:\n outstr += str(coord[0]) + '\\t' + str(coord[1]) + '\\n'\n\n return outstr", "def __repr__(self):\n s = self.print_bfs()\n return s", "def __repr__(self):\n str(self)", "def __repr__(self):\n return f\"{self.number} {self.name}: {self.desc}\"", "def __str__(self):\r\n\r\n retval = self.__class__.__name__ + ' ('\r\n for val in self.VALUES:\r\n value = getattr(self, val, None)\r\n if value is not None:\r\n retval += '%s:%.4f ' % (val, getattr(self, val))\r\n return retval.strip() + ')'", "def __repr__(self):\r\n\t\treturn str(self)", "def __repr__(self):\n return ''.join(f'\\ncompany: {self.company_name}\\nsize: {self.company_size}\\ncompany_founded: '\n f'{self.company_founded}\\ncompany_industry: {self.company_industry}\\ncompany_sector: '\n f'{self.company_sector}\\ncompany_type: {self.company_type}\\ncompany_rating: '\n f'{self.company_rating}\\ncompany_competitors: {self.company_competitors}\\ncompany_revenue: '\n f'{self.company_revenue}\\ncompany_headquarters: {self.company_headquarters}')", "def __repr__ (self):\n\t\tStr = \"\"\n\t\tfor i in self.structref:\n\t\t\tStr = Str + \"%-15s = \"%(i[self.NAME])\n\t\t\tvalue = self.value [i[self.NAME]]\n\t\t\tif isInteger(value):\n\t\t\t\tStr = Str + \"%d, 0x%X\"%(value,value)\n\t\t\t\tif value >= 0x20 and value <= 0xFF:\n\t\t\t\t\tStr = Str + \" '\" + chr (value) + \"'\"\n\t\t\telse:\n\t\t\t\tif type(value) == type(bytes(0)):\n\t\t\t\t\tStr = Str + value.decode(\"utf8\",\"ignore\")\n\t\t\t\telse:\n\t\t\t\t\tStr = Str + str(value) \n\t\t\t\t\t\n\t\t\tStr = Str + \"\\n\"\n\t\treturn Str", "def __repr__(self):\r\n return self.to_str()", "def __repr__(self):\r\n return self.to_str()", "def __repr__(self):\n\t\treturn repr( (self.name, self.position, self.cost, self.vorp) )", "def __str__(self):\n return repr(self)", "def __repr__(self):\r\n return str(self)", "def __repr__(self):\n return self.__dado", "def __repr__(self):\n # make a string out of my data\n return \"My name is {0}, my value {1}\".format(self.my_name, self.my_local_float)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __unicode__(self):\n prepr = com.pprint_thing(self, escape_chars=('\\t', '\\r', '\\n'),\n quote_strings=True)\n return \"%s(%s, dtype='%s')\" % (type(self).__name__, prepr, self.dtype)", "def __str__(self):\r\n return repr(self)", "def __repr__(self):\n\t\treturn str(self)", "def __repr__(self):\n\t\treturn str(self)", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.71572053", "0.70599174", "0.70522857", "0.69488704", "0.69239604", "0.6836956", "0.6819858", "0.6812137", "0.6802802", "0.6789431", "0.6776764", "0.6766797", "0.6753627", "0.6751448", "0.6745186", "0.67189395", "0.6699513", "0.6692117", "0.6681709", "0.6676318", "0.6663971", "0.6663641", "0.6663305", "0.6659223", "0.66523874", "0.6641484", "0.6641484", "0.6636226", "0.6630794", "0.66239804", "0.66203946", "0.66191924", "0.66149604", "0.66149604", "0.66149604", "0.66149604", "0.66149604", "0.66149604", "0.66149604", "0.66149604", "0.66149604", "0.66149604", "0.66149604", "0.66149604", "0.66149604", "0.66119593", "0.6608306", "0.66072005", "0.66072005", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275", "0.66051275" ]
0.74030674
0
Read the VQE convergence data for the mini BMN model from disk
def read_data( optimizer: str, p: dict, ): filename = f"{p['f']}_l{p['l']}_convergence_{optimizer}_{p['v']}_depth{p['d']}_reps{p['n']}_max{p['m']}.{p['s']}" if not os.path.isfile(filename): print(f"{filename} does not exist.") sys.exit() if p['s'] == 'h5': df = pd.read_hdf(filename, "vqe") if p['s'] == 'gz': df = pd.read_pickle(filename) return df[df.counts<=p['m']]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_models():\n model_files_cvd = np.sort(glob.glob(\"./grad_results/cvd*N1024_f0003.npy\"))\n model_files_mnist = np.sort(glob.glob(\"./grad_results/mnist*N25000_f02.npy\"))\n\n model_files_cvd = np.array([model_files_cvd[2], model_files_cvd[1], model_files_cvd[0]])\n\n results_cvd = []\n results_mnist = []\n\n for filename in model_files_cvd:\n results_cvd.append(np.load(filename))\n \n for filename in model_files_mnist:\n results_mnist.append(np.load(filename))\n\n return np.array(results_mnist), np.array(results_cvd)", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def readin():\r\n nodes = np.loadtxt('Vnodes.txt', ndmin=2)\r\n mats = np.loadtxt('Vmater.txt', ndmin=2)\r\n elements = np.loadtxt('Veles.txt', ndmin=2)\r\n loads = np.loadtxt('Vloads.txt', ndmin=2)\r\n return nodes, mats, elements, loads", "def read_dataset_v1():\n path = load_config()\n T = feather.read_dataframe(path['data_dir'] / 'T_dat.feather')\n E = feather.read_dataframe(path['data_dir'] / 'E_dat.feather')\n M = feather.read_dataframe(path['data_dir'] / 'Meta.feather')\n data = sio.loadmat(path['data_dir'] / 'highvar_genes.mat', squeeze_me=True)\n return T, E, M, data", "def read_dataset_v2():\n path = load_config()\n T = feather.read_dataframe(path['data_dir'] / 'T_dat_v2.feather')\n E = feather.read_dataframe(path['data_dir'] / 'E_dat_v2.feather')\n M = feather.read_dataframe(path['data_dir'] / 'Meta_v2.feather')\n data = sio.loadmat(path['data_dir'] / 'highvar_genes_v2.mat', squeeze_me=True)\n return T, E, M, data", "def load_vae_full(path, nb_of_bands, folder=False): \n latent_dim = 32\n \n # Build the encoder and decoder\n encoder, decoder = model.vae_model(latent_dim, nb_of_bands)\n\n # Build the model\n vae_loaded, vae_utils, Dkl = vae_functions.build_vanilla_vae(encoder, decoder, full_cov=False, coeff_KL = 0)\n\n if folder == False: \n vae_loaded.load_weights(path)\n else:\n print(path)\n latest = tf.train.latest_checkpoint(path)\n vae_loaded.load_weights(latest)\n\n return vae_loaded, vae_utils, encoder, decoder, Dkl", "def main(model_path='models/Nakakuki_Cell_2010_ODE'):\n n_file = []\n fitparam_files = os.listdir(model_path.strip('/') + '/fitparam')\n for file in fitparam_files:\n if re.match(r'\\d', file):\n n_file.append(int(file))\n for nth_paramset in n_file:\n os.makedirs(\n model_path.strip('/') \n + '/dat2npy/out/{:d}'.format(nth_paramset), exist_ok=True\n )\n nth_fitparam_files = os.listdir(\n model_path.strip('/') + '/fitparam/{:d}'.format(nth_paramset)\n )\n for dat_file in nth_fitparam_files:\n if 'fit' in dat_file:\n \"\"\"\n - fit_param%d.dat -> fit_param%d.npy\n - best_fitness.dat -> best_fitness.npy\n \"\"\"\n try:\n data = np.loadtxt(\n model_path.strip('/') + '/fitparam/{:d}/{}'.format(\n nth_paramset, dat_file\n ), dtype='float'\n )\n except ValueError:\n pass\n else:\n \"\"\"\n - count_num.dat -> count_num.npy\n - generation.dat -> generation.npy\n \"\"\"\n data = np.loadtxt(\n model_path.strip('/') + '/fitparam/{:d}/{}'.format(\n nth_paramset, dat_file\n ), dtype='int'\n )\n np.save(\n model_path.strip('/') + '/dat2npy/out/{:d}/'.format(nth_paramset)\n + dat_file.replace('.dat', '.npy'), data\n )\n if os.path.isfile(\n './logs/{:d}.log'.format(nth_paramset)):\n shutil.copyfile(\n './logs/{:d}.log'.format(nth_paramset),\n model_path.strip('/') \n + '/dat2npy/out/{:d}/optimization.log'.format(nth_paramset)\n )", "def load_data():\n\n dump_path = dump_base + '/micro_poi/mpoi_info/'\n\n assert os.path.exists(dump_path)\n\n dpath = dump_path + 'shortest_path.pickle'\n paths = joblib.load(dpath)\n\n dpath = dump_path + 'path_list.pickle'\n path_list = joblib.load(dpath)\n\n dpath = dump_path + 'gain.pickle'\n gain = joblib.load(dpath)\n\n dpath = dump_path + 'stay.pickle'\n stay_time = joblib.load(dpath)\n\n dpath = dump_path + 'reach.pickle'\n reach_time = joblib.load(dpath)\n\n spath = dump_base + '/micro_poi/model_params.list'\n model_params = np.loadtxt(spath)\n\n return np.array(paths), path_list, gain, stay_time, reach_time, model_params", "def load_back_from_disk(data_dir, istrain=True):\n \"\"\"load back metadata_df\"\"\"\n meta_data = pickle.load(open(os.path.join(data_dir, 'meta.pkl'), 'rb'))\n metadata_rows = meta_data[0]\n max_node = meta_data[1]\n\n \"\"\"itershard by loading from disk\"\"\"\n all_X, all_y, all_size, all_L, all_names, all_node_img = [], [], [], [], [], []\n\n for _, row in enumerate(metadata_rows):\n X = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['X'])))\n L = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['L'])))\n y = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['y'])))\n size = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['size'])))\n names = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['name'])))\n node_img = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['node_img'])))\n\n \"\"\" stack to list\"\"\"\n all_X.append(X)\n all_y.append(y)\n all_L.append(L)\n all_size.append(size)\n all_names.append(names)\n all_node_img.append(node_img)\n\n \"\"\" return a Dataset contains all X, y, w, ids\"\"\"\n all_X = np.squeeze(np.vstack(all_X))\n all_L = np.squeeze(np.vstack(all_L))\n all_y = np.squeeze(np.concatenate(all_y))\n all_size = np.squeeze(np.concatenate(all_size))\n all_names = np.squeeze(np.concatenate(all_names))\n all_node_img = np.squeeze(np.concatenate(all_node_img))\n\n # create output dataset\n dataset = dict()\n if istrain:\n dataset['X'] = all_X[:TRAIN_NUM]\n dataset['y'] = all_y[:TRAIN_NUM]\n dataset['size'] = all_size[:TRAIN_NUM]\n dataset['L'] = all_L[:TRAIN_NUM]\n dataset['name'] = all_names[:TRAIN_NUM]\n dataset['node_img'] = all_node_img[:TRAIN_NUM]\n else:\n dataset['X'] = all_X[:TEST_NUM]\n dataset['y'] = all_y[:TEST_NUM]\n dataset['size'] = all_size[:TEST_NUM]\n dataset['L'] = all_L[:TEST_NUM]\n dataset['name'] = all_names[:TEST_NUM]\n dataset['node_img'] = all_node_img[:TEST_NUM]\n\n return dataset, max_node", "def load_vae_conv(path,nb_of_bands,folder = False): \n latent_dim = 32\n \n # Build the encoder and decoder\n encoder, decoder = model.vae_model(latent_dim, nb_of_bands)\n\n # Build the model\n vae_loaded, vae_utils, Dkl = vae_functions.build_vanilla_vae(encoder, decoder, full_cov=False, coeff_KL = 0)\n\n if folder == False: \n vae_loaded.load_weights(path)\n else:\n latest = tf.train.latest_checkpoint(path)\n vae_loaded.load_weights(latest)\n\n return vae_loaded, vae_utils, encoder, Dkl", "def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76", "def read_model(node_file, mater_file, els_file, load_file, verbose=True):\n # rea\n nodes = np.loadtxt(node_file)\n mats = np.loadtxt(mater_file)\n elements = np.loadtxt(els_file, dtype=int)\n loads = np.loadtxt(load_file)\n \n # Generate echo files\n if verbose:\n np.savetxt(\"KNODES.txt\", nodes, fmt='%5.2f', delimiter=' ')\n np.savetxt(\"KMATES.txt\", mats, fmt='%5.2f', delimiter=' ')\n np.savetxt(\"KELEMS.txt\", elements, fmt='%5.2f', delimiter=' ')\n np.savetxt(\"KLOADS.txt\", loads, fmt='%5.2f', delimiter=' ')\n \n return nodes, mats, elements, loads", "def read_glm_epochs(infile):\n with open(infile, 'rb') as outp:\n glmepec = pickle.load(outp)\n return glmepec", "def parse_BS_data(retrieved_folder, fermi_level, kpoints):\n # conversion factor from Ry to eV\n eVscale = get_Ry2eV()\n\n retrieved_list = retrieved_folder.list_object_names()\n qdos_file_list = [i for i in retrieved_list if 'qdos.' in i]\n q_vec_file = 'qvec.dat'\n\n if q_vec_file in retrieved_list:\n with retrieved_folder.open(q_vec_file) as file_opened:\n q_vec = np.loadtxt(file_opened, skiprows=1)\n\n for icount, fname in enumerate(qdos_file_list):\n with retrieved_folder.open(fname) as _f:\n loaded_file = np.loadtxt(_f)\n if icount == 0:\n total_qdos = loaded_file\n else:\n total_qdos[:, 5:] += loaded_file[:, 5:]\n\n ef = fermi_level.value # in Ry unit\n total_qdos[:, 0] = (total_qdos[:, 0] - ef) * eVscale\n eng_points = set(total_qdos[:, 0])\n eng_points = np.sort(list(eng_points))\n no_eng_points = len(eng_points)\n\n qdos_intensity = np.ndarray(shape=(no_eng_points, len(q_vec)))\n for ne in range(np.shape(qdos_intensity)[0]):\n nk = np.shape(qdos_intensity)[1]\n # sum up all l-channels (5 is only the s-channel!)\n qdos_intensity[ne, :] = np.sum(total_qdos[ne * nk:(ne + 1) * nk, 5:], axis=1) / eVscale\n\n qdos_intensity = qdos_intensity.T # setting eng-kpts corresponds to x-y asix\n q_vec = np.asarray(q_vec) # converting q_vec into array\n eng_points = (np.asarray(eng_points)) # converting eng_popints into array in Ry unit\n\n # To save into the ArrayData\n array = ArrayData()\n array.set_array('BlochSpectralFunction', qdos_intensity)\n array.set_array('Kpts', q_vec)\n array.set_array('energy_points', eng_points)\n if kpoints.labels is not None:\n klbl_dict = dict(kpoints.labels) # Special k-points\n array.extras['k-labels'] = klbl_dict\n\n return {'BS_Data': array}", "def load_data_from_disk(self):\n data = dict()\n Omega_M = self.theta_fid[0]\n der_den = 1. / (2. * self.delta_theta)\n\n print (\"Loading data from disk.. Omega_M = \", Omega_M, \"delta_theta = \", self.delta_theta[0])\n\n for key in ['x_central', 'x_m', 'x_p', 'x_central_test', 'x_m_test', 'x_p_test']:\n data[key] = np.load(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy')\n\n return data, der_den", "def test_run_read(self):\n\n self.ictrl[0] = 1 + 2 + 4 + 8\n vmec_f90wrap.runvmec(self.ictrl, self.filename, self.verbose, \\\n self.fcomm, reset_file)\n\n self.assertTrue(self.ictrl[1] in success_codes)\n\n self.assertEqual(vmec_f90wrap.vmec_input.nfp, 3)\n self.assertEqual(vmec_f90wrap.vmec_input.mpol, 4)\n self.assertEqual(vmec_f90wrap.vmec_input.ntor, 3)\n print('rbc.shape:', vmec_f90wrap.vmec_input.rbc.shape)\n print('rbc:',vmec_f90wrap.vmec_input.rbc[101:103, 0:4])\n\n # n = 0, m = 0:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.rbc[101,0], 1.3782)\n\n # n = 0, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[101,1], 4.6465E-01)\n\n # n = 1, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[102,1], 1.6516E-01)\n\n # Now try reading in the output\n wout_file = os.path.join(os.path.dirname(__file__), 'wout_li383_low_res.nc')\n ierr = 0\n vmec_f90wrap.read_wout_mod.read_wout_file(wout_file, ierr)\n self.assertEqual(ierr, 0)\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.betatot, \\\n 0.0426215030653306, places=4)\n\n print('iotaf.shape:',vmec_f90wrap.read_wout_mod.iotaf.shape)\n print('rmnc.shape:',vmec_f90wrap.read_wout_mod.rmnc.shape)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.iotaf[-1], \\\n 0.654868168783638, places=4)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.rmnc[0, 0], \\\n 1.4773028173065, places=4)", "def read_model(filename):\n return joblib.load(filename)", "def load_data(self):\n x_vector = pickle.load(open(self.file_stem + \"x.pickle\", \"rb\"))\n ode_sols = pickle.load(open(self.file_stem + \"sols.pickle\", \"rb\"))\n forcings = pickle.load(open(self.file_stem + \"fs.pickle\", \"rb\"))\n sl_coeffs = pickle.load(open(self.file_stem + \"coeffs.pickle\", \"rb\"))\n\n return x_vector, ode_sols, forcings, sl_coeffs", "def load_model_file(device_index):\n print(\"\\nStart loading model...\")\n\n return kdp_wrapper.isi_load_nef(device_index, MODEL_FILE, ISI_APP_ID)", "def test_readfile(self):\n fname = os.path.join(self.datadir, 'monol_testA_E3-50_rebin4_gti') + \\\n HEN_FILE_EXTENSION\n command = \"{0}\".format(fname)\n\n hen.io.main(command.split())", "def load_NMF_model():\n model = pickle.load(open(\"models/nmf_model.sav\", 'rb'))\n Q = model.components_ \n return model, Q", "def read_qmcpack_dense(filename):\n with h5py.File(filename, 'r') as fh5:\n enuc = fh5['Hamiltonian/Energies'][:][0]\n dims = fh5['Hamiltonian/dims'][:]\n hcore = fh5['Hamiltonian/hcore'][:]\n chol = fh5['Hamiltonian/DenseFactorized/L'][:]\n\n return hcore, chol, enuc", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def _read_csvs(self):\n self.data = pd.read_csv(self.path+self.name, index_col=0)", "def load_epsvec( fname ):\n try:\n E = numpy.loadtxt( fname )\n except IOError:\n prefix = '/data/jberwald/neurons/epsilons/'\n E = numpy.loadtxt( prefix + fname )\n return E", "def test_read_input(self):\n self.ictrl[0] = run_modes['input']\n vmec_f90wrap.runvmec(self.ictrl, self.filename, self.verbose, \\\n self.fcomm, reset_file)\n\n self.assertTrue(self.ictrl[1] in success_codes)\n\n self.assertEqual(vmec_f90wrap.vmec_input.nfp, 3)\n self.assertEqual(vmec_f90wrap.vmec_input.mpol, 4)\n self.assertEqual(vmec_f90wrap.vmec_input.ntor, 3)\n print('rbc.shape:', vmec_f90wrap.vmec_input.rbc.shape)\n print('rbc:',vmec_f90wrap.vmec_input.rbc[101:103, 0:4])\n\n # n = 0, m = 0:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.rbc[101,0], 1.3782)\n\n # n = 0, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[101,1], 4.6465E-01)\n\n # n = 1, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[102,1], 1.6516E-01)", "def get_examples(data_dir, mode, task_id, shard_id):\n file_path = get_full_filename(data_dir, mode, task_id, shard_id)\n relative_path = \"/\".join(file_path.split(\"/\")[3:])\n tf.logging.info(\"Reading file: %s\" % (file_path))\n print(relative_path)\n #client = storage.Client(projectname, credentials=credentials)\n #bucket = client.get_bucket(bucket_name)\n blob = storage_bucket.blob(relative_path)\n if not blob.exists():\n tf.logging.info(\"Path doesn't exist\")\n return None\n nq_data = extract_nq_data(file_path)\n tf.logging.info(\"NQ data Size: \" + str(len(nq_data.keys())))\n\n tf.logging.info(\"Performing entity extraction\")\n fact_extracted_data = entity_link_nq(nq_data)\n return fact_extracted_data", "def read_data():\n ADV_MAT = np.load('ADV.npy');\n ADJ_MAT = np.load('ADJ.npy');\n PR_MAT = np.load('PR.npy'); \n NN_MAT = np.load('NN.npy');\n for i in range(ADV_MAT.shape[0]):RUNNING_DATA['ADV___'+str(i)] = ADV_MAT[i];\n for i in range(ADJ_MAT.shape[0]):RUNNING_DATA['ADJ___'+str(i)] = ADJ_MAT[i];\n for i in range(PR_MAT.shape[0]):RUNNING_DATA['PR___'+str(i)] = PR_MAT[i];\n for i in range(NN_MAT.shape[0]):RUNNING_DATA['NN___'+str(i)] = NN_MAT[i];", "def load_model(file_index):\n normal, abnormal, all = read_in(file_index, 1, 2, 0.3)\n autoencoder = keras.models.load_model('Working_Data/ae_patient_' + str(file_index) + '_dim' + str(100) + '_model.h5')\n reconstructed = autoencoder.predict(all)\n reconstruction_save = \"Working_Data/reconstructed_cdae_10d_Idx\" + str(file_index) + \".npy\"\n np.save(reconstruction_save, reconstructed)", "def read_mesa(self, filename=None):\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read first line in file to get number of parameters\n f.seek(0)\n l = f.readline()\n nparams_file = int(l.split(' ')[-1])\n\n # skip lines 2-4\n for i in range(3):\n f.readline()\n\n # the fifth line will give us the number of variables\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= 6\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n logR_idx = -1\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n if var_name_file == 'logR':\n logR_idx = i\n continue\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i\n except KeyError:\n var_idx_map[self.idx['spec'] - 1 + network_module.network_species_index(var_name_file.lower())] = i\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n # need to reverse the inputs file here\n\n n = npts_file - i - 1\n\n base_r[n] = R_solar * 10**variables[logR_idx]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[n, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def _fetch_large():\n # Large training data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TRAIN.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"train\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TRAIN.tar.gz\",\n \"mv SMNI_CMI_TRAIN train\",\n \"find train | grep gz$ | xargs gunzip\",\n ],\n )\n # Large test data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TEST.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"test\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TEST.tar.gz\",\n \"mv SMNI_CMI_TEST test\",\n \"find test | grep gz$ | xargs gunzip\",\n ],\n )", "def read_data(self):\n data = np.genfromtxt(self.__file) # Planck SED\n self.__nu = 10.0**data[:,0]\n self.__nuF = 10.0**data[:,2]\n self.__err = 10.0**data[:,3]\n #self.__W = 10.0**data[:,4]\n self.__yerr = [ self.__nuF - self.__nuF / self.__err, \\\n self.__nuF * self.__err - self.__nuF ]\n self.__maxY = max( self.__nuF )\n self.__minY = min( self.__nuF )", "def read_data(self):\n self.data = reduce_spectrum(self.filename)", "def load_vecs():\n global VECTORIZER\n global CECTORIZER\n\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n\n if os.path.isfile(v_file) and os.path.isfile(d_file):\n with open(v_file, 'rb') as f:\n VECTORIZER = pickle.load(f)\n with open(d_file, 'rb') as f:\n CECTORIZER = pickle.load(f)\n return True\n\n return False", "def read_data(data_path):\n tr = data_path + 'train_vectors.txt'\n v = data_path + 'val_vectors.txt'\n tst = data_path + 'test_vectors.txt'\n return tr, v, tst", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def get_data():\r\n if not path_validation(MODEL_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_DATA_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_LABEL_PATH, read_access=True):\r\n exit(0) \r\n\r\n params = joblib.load(MODEL_PATH)\r\n test_images = np.load(TEST_DATA_PATH)\r\n test_labels = np.load(TEST_LABEL_PATH)\r\n\r\n # Addition of bias in test set\r\n test_images = np.insert(test_images, 0, 1, axis=1)\r\n\r\n return params, test_images, test_labels", "def read_wikibrain_vecs(path):\n matrix = []\n with open(path, \"r\") as vecs:\n vecs.readline()\n for line in vecs:\n matrix.append(map(float, line.rstrip(\"\\n\").split(\"\\t\")))\n return matrix", "def get_memes_data(path):\n data = load_files(path)\n memes = np.array(data['filenames'])\n return memes", "def readmodel(model = 'dominguez'):\n ebl_file_path = os.path.join(os.path.split(__file__)[0],'data/')\n\n if model == 'kneiske':\n file_name = join(ebl_file_path , 'ebl_nuFnu_tanja.dat')\n elif model == 'franceschini':\n file_name = join(ebl_file_path , 'ebl_franceschini.dat')\n elif model == 'dominguez':\n file_name = join(ebl_file_path , 'ebl_dominguez11.out')\n elif model == 'dominguez-upper':\n file_name = join(ebl_file_path , 'ebl_upper_uncertainties_dominguez11.out')\n elif model == 'dominguez-lower':\n file_name = join(ebl_file_path , 'ebl_lower_uncertainties_dominguez11.out')\n elif model == 'inoue':\n file_name = join(ebl_file_path , 'EBL_z_0_baseline.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_baseline.dat')\n elif model == 'inoue-low-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_low_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_low_pop3.dat')\n elif model == 'inoue-up-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_up_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_up_pop3.dat')\n elif model == 'gilmore':\n file_name = join(ebl_file_path , 'eblflux_fiducial.dat')\n elif model == 'gilmore-fixed':\n file_name = join(ebl_file_path , 'eblflux_fixed.dat')\n elif model == 'cuba':\n file_name = join(ebl_file_path , 'CUBA_UVB.dat')\n elif model == 'finke':\n file_name = join(ebl_file_path , 'ebl_modelC_Finke.txt')\n else:\n raise ValueError(\"Unknown EBL model chosen!\")\n\n data = np.loadtxt(file_name)\n if model.find('inoue') >= 0:\n z = np.array([0.])\n #z = data[0,1:]\n #nuInu = data[:,1]\n lmu = data[:,0]\n nuInu = np.array([data[:,1]]).T\n raise ValueError('Inoue models not correctly implemented at the moment, choose another model')\n\n elif model.find('gilmore') >= 0:\n z = data[0,1:]\n lmu = data[1:,0] * 1e-4 # convert from Angstrom to micro meter\n nuInu = data[1:,1:] \n nuInu[nuInu == 0.] = 1e-20 * np.ones(np.sum(nuInu == 0.))\n \n # convert from ergs/s/cm^2/Ang/sr to nW/m^2/sr\n nuInu = (nuInu.T * data[1:,0]).T * 1e4 * 1e-7 * 1e9 \n\n elif model == 'cuba':\n z = data[0,1:-1]\n lmu = data[1:,0] * 1e-4\n nuInu = data[1:,1:-1]\n\n # replace zeros by 1e-40\n idx = np.where(data[1:,1:-1] == 0.)\n nuInu[idx] = np.ones(np.sum(nuInu == 0.)) * 1e-20\n\n # in erg / cm^2 / s / sr\n nuInu = (nuInu.T * c.c.value / (lmu * 1e-6)).T \n nuInu *= 1e6 # in nW / m^2 / sr\n\n # check where lmu is not strictly increasing\n idx = np.where(np.diff(lmu) == 0.)\n for i in idx[0]:\n lmu[i+1] = (lmu[i + 2] + lmu[i]) / 2.\n\n else:\n z = data[0,1:]\n lmu = data[1:,0]\n nuInu = data[1:,1:]\n if model == 'finke': \n lmu = lmu[::-1] * 1e-4\n nuInu = nuInu[::-1]\n\n return EBL(z,lmu,nuInu, model = model)", "def load_data_from_fold(data_path):\r\n print(\"\\nLoading data from json folder {}\".format(data_path))\r\n\r\n SAMPLES_TO_CONSIDER = 22050\r\n\r\n data = preprocess_dataset(data_path, SAMPLES_TO_CONSIDER)\r\n\r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def read_files(data_loc, vector_size, window, epochs, min_df, max_df):\n\n class Data: pass\n\n speech = Data()\n\n print(\"-- train data\")\n speech.train_data, speech.train_fnames, speech.train_labels = read_tsv(data_loc, \"train.tsv\")\n print(len(speech.train_data))\n\n print(\"-- dev data\")\n speech.dev_data, speech.dev_fnames, speech.dev_labels = read_tsv(data_loc, \"dev.tsv\")\n print(len(speech.dev_data))\n\n print(\"-- test data\")\n test_data, test_fnames = read_unlabeled(data_loc, 'test')\n\n # print(\"-- unlabeled data\")\n # unlabeled_data, unlabeled_fnames = read_unlabeled(data_loc, 'unlabeled')\n # print(len(unlabeled_fnames))\n\n print(\"-- transforming data and labels\")\n speech.test_fnames = test_fnames\n\n train_docs = []\n train_analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, document in enumerate(speech.train_data):\n words = document\n tags = [\"train_\" + str(i)]\n train_docs.append(train_analyzedDocument(words, tags))\n dev_docs = []\n dev_analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, document in enumerate(speech.dev_data):\n words = document\n tags = [\"dev_\" + str(i)]\n dev_docs.append(dev_analyzedDocument(words, tags))\n test_docs = []\n test_analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, document in enumerate(test_data):\n words = document\n tags = [\"test_\" + str(i)]\n test_docs.append(test_analyzedDocument(words, tags))\n docs = train_docs + dev_docs + test_docs\n model = Doc2Vec(vector_size=vector_size, window=window, min_count=1, workers=8, epochs=epochs, negative=5, sample=1e-4)\n model.build_vocab(docs)\n model.train(docs, total_examples=model.corpus_count, epochs=model.epochs)\n print(model.most_similar('북한'))\n speech.train_doc_vec = np.zeros((4120, vector_size))\n speech.dev_doc_vec = np.zeros((4120, vector_size))\n speech.test_doc_vec = np.zeros((4120, vector_size))\n\n for i in range(4120):\n prefix_train = 'train_' + str(i)\n speech.train_doc_vec[i] = model[prefix_train]\n for i in range(4120):\n prefix_train = 'dev_' + str(i)\n speech.dev_doc_vec[i] = model[prefix_train]\n for i in range(4120):\n prefix_test = 'test_' + str(i)\n speech.test_doc_vec[i] = model[prefix_test]\n from sklearn import preprocessing\n speech.le = preprocessing.LabelEncoder()\n speech.le.fit(speech.train_labels)\n speech.target_labels = speech.le.classes_\n speech.trainy = speech.le.transform(speech.train_labels)\n speech.devy = speech.le.transform(speech.dev_labels)\n return speech", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def read_PSSM_data(self):\n\n names = os.listdir(self.pssm_path)\n fname = [n for n in names if n.find(self.molname)==0]\n\n if len(fname)>1:\n raise ValueError('Multiple PSSM files found for %s in %s',self.mol_name,self.pssm_path)\n if len(fname)==0:\n raise FileNotFoundError('No PSSM file found for %s in %s',self.mol_name,self.pssm_path)\n else:\n fname = fname[0]\n\n f = open(self.pssm_path + '/' + fname,'rb')\n data = f.readlines()\n f.close()\n raw_data = list( map(lambda x: x.decode('utf-8').split(),data))\n\n self.res_data = np.array(raw_data)[:,:3]\n self.res_data = [ (r[0],int(r[1]),r[2]) for r in self.res_data ]\n self.pssm_data = np.array(raw_data)[:,3:].astype(np.float)", "def read_data(path, batch_size, qp, frac, kernel, model):\n # load h5 file and get dictionaries\n inputs_dict, labels_dict, _ = get_dataset_dict(path, qp)\n\n # create training / validation dictionaries\n block_keys = [k for k in inputs_dict]\n train_inputs_dict, train_labels_dict, val_inputs_dict, val_labels_dict = (dict() for _ in range(4))\n\n # get inputs / labels for block & frac position\n for block in block_keys:\n inputs = inputs_dict[block][frac]\n\n # only use inputs that can be split 80 / 20 train / validation and fill out a batch\n split_percentage = 4/5\n if len(inputs) < batch_size / split_percentage:\n continue\n\n # if model contains non-linear activations, use same input & label size\n inputs = inputs[:, kernel:-kernel, kernel:-kernel, :] if \"scratch\" not in model else inputs\n\n labels = labels_dict[block][frac]\n\n # shuffle the pairs\n inputs, labels = array_shuffle(len(inputs), inputs, labels)\n\n # split 80 / 20\n (train_inputs, train_labels), (val_inputs, val_labels) = split_data(split_percentage, inputs, labels)\n\n # put into correct dictionary entry\n train_inputs_dict[block] = train_inputs\n train_labels_dict[block] = train_labels\n val_inputs_dict[block] = val_inputs\n val_labels_dict[block] = val_labels\n\n return train_inputs_dict, train_labels_dict, val_inputs_dict, val_labels_dict", "def read_data_set():\n # shapes of datasets -- [] means expanded form:\n # - X: J\n # - net.R: J [x J x 1]\n # - F_DIST: J x J x num_features\n # - F_DIST_w1: J x J x num_features\n # - w['except_first'][-1]: (last weights) J x num_features [x 1]\n # - w['except_first'][1:-1]: (second to last weights) J x J x num_features\n # - first weights **were** also J x J x num_features\n # - w['first_for_r']: J x 1 x num_features\n\n read_X()\n read_weights(read_FDIST())", "def test_brainvision_data():\n assert_raises(IOError, read_raw_brainvision, vmrk_path)\n assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,\n preload=True, scale=\"foo\")\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw_py = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_path, montage=montage,\n eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n assert_true('RawBrainVision' in repr(raw_py))\n\n assert_equal(raw_py.info['highpass'], 0.)\n assert_equal(raw_py.info['lowpass'], 250.)\n\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_py, times_py = raw_py[picks]\n\n # compare with a file that was generated using MNE-C\n raw_bin = Raw(eeg_bin, preload=True)\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_bin, times_bin = raw_bin[picks]\n\n assert_array_almost_equal(data_py, data_bin)\n assert_array_almost_equal(times_py, times_bin)\n\n # Make sure EOG channels are marked correctly\n for ch in raw_py.info['chs']:\n if ch['ch_name'] in eog:\n assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)\n elif ch['ch_name'] == 'STI 014':\n assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)\n elif ch['ch_name'] in raw_py.info['ch_names']:\n assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)\n else:\n raise RuntimeError(\"Unknown Channel: %s\" % ch['ch_name'])\n\n # test loading v2\n read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True,\n response_trig_shift=1000)", "def load_data(m=5000, n=100, path='D:/file/vscode/py/data/mnist.npz'):\r\n f = np.load(path)\r\n x_train, y_train = f['x_train'], f['y_train']\r\n\r\n x_test, y_test = f['x_test'], f['y_test']\r\n\r\n f.close()\r\n return (x_train, y_train), (x_test, y_test)", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def readEEGepoch(eegfilename, mainDir):\n # subject = 'ES9007' \n datapath = os.path.join(mainDir)\n os.chdir(datapath)\n \n folders = os.listdir(datapath)\n \n for dir in folders:\n \n os.chdir(os.path.join(datapath, dir))\n file = glob.glob(eegfilename)\n \n if file:\n print('>>>>>>>>>>>>> file loaded from >>>>>>>>>>>>>>>>>:', os.getcwd())\n filepath = os.path.join(os.getcwd(), eegfilename) \n dat = mne.read_epochs(filepath, preload=True) \n break \n return dat", "def _read_vee(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 2:\n size1, size2 = int(line[0]), int(line[1])\n vee = NP.zeros((size1, size1, size2, size2), dtype=NP.float64)\n elif len(line) == 5:\n mu, nu, lmda, sgma, val = int(line[0]) - 1, int(line[1]) - 1, int(line[2]) - 1, int(line[3]) - 1, NP.float64(line[4])\n vee[mu,nu,lmda,sgma] = \\\n vee[nu,mu,lmda,sgma] = \\\n vee[mu,nu,sgma,lmda] = \\\n vee[nu,mu,sgma,lmda] = \\\n vee[lmda,sgma,mu,nu] = \\\n vee[sgma,lmda,mu,nu] = \\\n vee[lmda,sgma,nu,mu] = \\\n vee[sgma,lmda,nu,mu] = \\\n val\n return vee", "def read_model(input_file):\n with open(input_file) as inp:\n labels = inp.readline().strip().split(\" \")\n init_conc = np.array(list(map(float, inp.readline().strip().split(\" \"))))\n\n stoich = []\n for i in range(len(labels)):\n stoich.append(list(map(float, inp.readline().strip().split(\" \"))))\n S_matrix = np.array(stoich)\n\n educt = []\n for i in range(len(labels)):\n educt.append(list(map(float, inp.readline().strip().split(\" \"))))\n educt_matrix = np.array(educt)\n\n kin_par = np.array(list(map(float, inp.readline().strip().split(\" \"))))\n t_T, t_eval_step = list(map(float, inp.readline().strip().split(\" \")))\n\n return labels, init_conc, S_matrix, educt_matrix, kin_par, t_T, t_eval_step", "def read_szf_fmv_12(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"land_frac\", uint_nan),\n (\"flagfield_gen2\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags(data)\n\n return data, metadata", "def read(self) :\n # Open the file.\n f = open(self.output, 'r')\n lines = f.readlines()\n \n # Find the eigenvalue.\n count = 0\n while True :\n words = lines[count].split()\n if len(words) == 5 :\n if words[0] == \"*\" and words[1] == \"K-EFF\":\n self.keff = float(words[3])\n break\n count += 1\n \n # Find the peaking.\n a = 0 # Assembly index\n \n while True :\n words = lines[count].split()\n if len(words) == 8 :\n if words[0] == \"NODE\" and words[1] == \"AVERAGE\" and words[2] == \"POWERS\" :\n count += 5 # Powers start 5 lines below title\n for row in range(0, self.dimension) :\n words = lines[count].split()\n assert(len(words) >= self.dimension)\n for col in range(0, self.dimension) :\n self.peaking_map[row, col] = float(words[col+1])\n if self.core.stencil[row, col] > 0:\n #print \" a=\", a, \" row=\", row, \" col=\", col, len(self.peaking)\n self.peaking[a] = self.peaking_map[row, col]\n a += 1\n count += 1\n break\n count += 1 \n # Maximum peaking.\n self.maxpeak = np.max(self.peaking)", "def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()", "def __init__(self, qmm, config, update_tof, rr_qe, path, meas_len, smearing, lsb=False):\n\n self.qmm = qmm\n self.config = config\n self.rr_qe = rr_qe\n self.num_of_states = 3\n self.path = path\n self.saved_data = None\n self.time_diff = None\n self.update_tof = update_tof\n self.finish_train = 0\n self.mu = dict()\n self.sigma = dict()\n self._load_file(path)\n self.lsb = lsb\n self.meas_len = meas_len\n self.smearing = smearing", "def load_breeze(self, breeze_path):\n self.breeze = pd.read_pickle(os.path.join(self.data_path, 'breeze.pick'))", "def main():\n parser = argparse.ArgumentParser(description=\"Convert a checkpoint file into a support sets and a reconstructor \"\n \"weights files\")\n parser.add_argument('--exp', type=str, required=True, help=\"set experiment's model dir (created by `train.py`)\")\n\n # Parse given arguments\n args = parser.parse_args()\n\n # Check structure of `args.exp`\n if not osp.isdir(args.exp):\n raise NotADirectoryError(\"Invalid given directory: {}\".format(args.exp))\n models_dir = osp.join(args.exp, 'models')\n if not osp.isdir(models_dir):\n raise NotADirectoryError(\"Invalid models directory: {}\".format(models_dir))\n checkpoint_file = osp.join(models_dir, 'checkpoint.pt')\n if not osp.isfile(checkpoint_file):\n raise FileNotFoundError(\"Checkpoint file not found: {}\".format(checkpoint_file))\n\n print(\"#. Convert checkpoint file into support sets and reconstructor weight files...\")\n\n # Load checkpoint file\n checkpoint_dict = torch.load(checkpoint_file)\n\n # Get checkpoint iteration\n checkpoint_iter = checkpoint_dict['iter']\n print(\" \\\\__Checkpoint iteration: {}\".format(checkpoint_iter))\n\n # Save support sets weights file\n print(\" \\\\__Save checkpoint support sets weights file...\")\n torch.save(checkpoint_dict['support_sets'], osp.join(models_dir, 'support_sets-{}.pt'.format(checkpoint_iter)))\n\n # Save reconstructor weights file\n print(\" \\\\__Save checkpoint reconstructor weights file...\")\n torch.save(checkpoint_dict['reconstructor'], osp.join(models_dir, 'reconstructor-{}.pt'.format(checkpoint_iter)))", "def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies", "def load_data(args, path=\"./project_data/\", dataset=\"paper_author.txt\"):\r\n print('Loading {} dataset...'.format(dataset))\r\n\r\n paper_author = make_graph(path+dataset)\r\n print('The number of nodes :', paper_author.number_of_nodes())\r\n adj = nx.adjacency_matrix(paper_author)\r\n\r\n # build symmetric adjacency matrix\r\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\r\n # features = normalize(features)\r\n adj = normalize(adj + sp.eye(adj.shape[0]))\r\n adj = sparse_mx_to_torch_sparse_tensor(adj)\r\n\r\n if args.model == 'adj':\r\n features = adj\r\n\r\n elif args.model == 'node2vec':\r\n print('Already exist Node2vec file')\r\n file_name = './Node2vec_walk_%s_num_walks_%s_truncated.pickle' % (str(args.walk_length), str(args.num_walks))\r\n if os.path.isfile(file_name):\r\n with open(file_name, 'rb') as file:\r\n features = pickle.load(file)\r\n else:\r\n node2vec = Node2Vec(graph=paper_author, # target graph\r\n dimensions=int(args.feature_node), # embedding dimension\r\n walk_length=int(args.walk_length), # number of nodes in each walks\r\n p=2, # return hyper parameter\r\n q=1, # inout parameter, q값을 작게 하면 structural equivalence를 강조하는 형태로 학습됩니다.\r\n weight_key=None, # if weight_key in attrdict\r\n num_walks=int(args.num_walks), \r\n workers=4,\r\n )\r\n features = torch.tensor(node2vec.fit(window=10, min_count=0).wv.vectors)\r\n with open(file_name, 'wb') as file:\r\n pickle.dump(features, file)\r\n return adj, features # , labels, idx_train, idx_val, idx_test\r", "def read_data(path):\n with h5py.File(path, 'r') as hf:\t\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_", "def read_szx_fmv_11(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\"sat_track_azi\"]\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath_indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"f_kp\", byte_nan), (\"f_usable\", byte_nan), (\"f_f\", uint_nan),\n (\"f_v\", uint_nan), (\"f_oa\", uint_nan), (\"f_sa\", uint_nan),\n (\"f_tel\", uint_nan), (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n return data, metadata", "def data_input(self):\n path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))\n if not os.path.isfile('{0}/{1}.csv'.format(path, self.data_file)):\n print 'Error: Dataset file is not exist.'\n exit()\n # Uplead Dataset.csv file.\n f = open('{0}/{1}.csv'.format(path, self.data_file), 'r')\n print 'Now uploading dataset File.....'\n f = list(f)\n # The Dataset contains heading, number of lines - heading\n self.number_of_VOCs = sum(1 for row in f)-1\n # Count number of columns, last column's value is empty, that is why -1.\n self.number_of_columns = len(f[0].split(',')) -1\n self.first_m_z = int(f[0].split(',')[3]) # find the first m/z value.\n self.last_m_z = int(f[0].split(',')[-2]) # find the last m/z value.\n print 'dataset includes ', self.number_of_VOCs, 'VOCs in all samples '\n print ('dataset includes ', self.number_of_columns, ' Columns, ',\n 'm/z values start from ', self.first_m_z,\n 'and end ', self.last_m_z)\n # Create a matrix with a shape of (number_of_VOCs X number_of_columns) filled with zeros.\n self.dataset = np.zeros((self.number_of_VOCs,\n self.number_of_columns))\n for line in range(1, len(f)):\n if int(float(f[line].strip().split(',')[0])) not in self.loaded_samples:\n self.loaded_samples.append(int(float(f[line].strip().split(',')[0])))\n for column in range(self.number_of_columns):\n self.dataset[line-1][column] = int(float(f[line].strip().split(',')[column]))", "def read_data_model(filename='data/data_model.pkl'):\n\n with open(filename, 'r') as pklfile:\n root = pkl.load(pklfile)\n\n return root", "def load_data(from_stored_data=False):\n\n if from_stored_data:\n #data_X = pickle.load(open(file_X, \"rb\"))\n data_X = pickle.load(open(\"x_sparse_small.p\", \"rb\"))\n #data_Y = pickle.load(open(file_Y, \"rb\"))\n data_Y = pickle.load(open(\"y_sparse_small.p\", \"rb\"))\n return data_X, data_Y\n\n data_X = None\n data_Y = None\n\n for num_subject in range(num_subjects):\n print \"subject :\", str(num_subject+1), \" processing started \"\n ind_data_x = None\n ind_data_y = None\n \n subject_data = sio.loadmat(\"data/data-science-P\" + str(num_subject + 1) + \".mat\")\n\n # big three headers\n meta = subject_data.get(\"meta\")\n info = subject_data.get(\"info\")[0]\n trials = subject_data.get(\"data\")\n\n # meta data\n nvoxels = meta[\"nvoxels\"][0][0][0][0]\n colToCoord = meta[\"colToCoord\"][0][0]\n coordToCol = meta[\"coordToCol\"][0][0]\n for num_trial in range(len(trials)):\n sys.stdout.write(str(num_trial)+\" \")\n sys.stdout.flush()\n # create feature vectors\n voxels = trials[num_trial][0][0]\n #feature_vec = np.zeros(dim_x * dim_y * dim_z)\n feature_vec = np.zeros((dim_x_half, dim_y, dim_z))\n for i in range(len(voxels)):\n # save only the left of the voxels to decrease the dimension of the vector \n colInfo = colToCoord[i, :]\n x = colInfo[0] - 1 # index in data starts from 1\n y = colInfo[1] - 1 # same\n z = colInfo[2] - 1 # same\n if x < dim_x_half:\n feature_vec[x][y][z] = voxels[i]\n #feature_vec[z * (dim_x * dim_y) + y * dim_x + x] = voxels[i]\n #feature_vec[z * (dim_x_half * dim_y) + y * dim_x_half + x] = voxels[i]\n feature_vec = feature_vec.flatten()\n feature_vec = sp.csr_matrix(feature_vec)\n\n # create label vectors\n trial_info = info[num_trial]\n cond_number = trial_info[1][0][0] - 2 # starts from 2 (2 ~ 13)\n word_number = trial_info[3][0][0] - 1 # starts from 1 (1 ~ 5)\n label_vec = np.zeros(num_conds * num_words_per_cond)\n label_vec[cond_number * num_words_per_cond + word_number] = 1\n \n # append data\n #data_X = sp.vstack((data_X, feature_vec)) if data_X is not None else feature_vec\n #data_Y = np.vstack((data_Y, label_vec)) if data_Y is not None else label_vec\n ind_data_x = sp.vstack((ind_data_x, feature_vec)) if ind_data_x is not None else feature_vec\n ind_data_y = np.vstack((ind_data_y, label_vec)) if ind_data_y is not None else label_vec\n\n # save ind_data files\n pickle.dump(ind_data_x, open(\"ind_\"+str(num_subject+1)+\"_x\", \"wb\"))\n pickle.dump(ind_data_y, open(\"ind_\"+str(num_subject+1)+\"_y\", \"wb\"))\n\n print \"subject :\", str(num_subject+1), \" processing done \"\n \n # save data file\n #pickle.dump(data_X, open(file_X, \"wb\"))\n #pickle.dump(data_Y, open(file_Y, \"wb\"))\n\n return data_X, data_Y", "def read():\n\n # load json and create model\n base_model = _model_builder.Network(0, model_type=\"load_model\")\n\n #load image and process\n digit = Image.open(\"./data/number.jpg\").convert(\"L\")\n digit = ImageOps.expand(digit,border=60,fill='black')\n digit = digit.resize((28, 28))\n\n #flatten the matrix (for input into MLP network todo:CNN)\n digit_flat = numpy.zeros((1, 784))\n counter = 0\n for j in range(0, 28):\n for i in range(0, 28):\n digit_flat[0][counter] = (digit.getpixel((i, j)))/255.0\n counter = counter+1\n\n #predict\n os.system('clear')\n base_model.predict(digit_flat)", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def Subtask4_pre_train_5():\n with open(PATH + 'pre_train_4_Subtask4.txt', encoding='utf-8') as fi:\n evi = eval(fi.read())\n\n train_data = np.load(PATH + 'pre_train_2_Subtask4.npy', allow_pickle=True).item()\n model = word2vec.KeyedVectors.load_word2vec_format(PATH + \"data/GoogleNews-vectors-negative300.bin\", binary=True)\n\n with open(PATH + 'pre_train_3_Subtask4.txt', encoding='utf-8') as f:\n document = eval(f.read())\n\n with open(PATH + 'traindata_Subtask4.txt', 'w') as fp:\n for data in train_data.items():\n claim = data[0]\n claim = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", claim)\n claim = claim.split(' ')\n claim = list(filter(lambda x: x in model.vocab, claim))\n Vi = []\n for i in range(len(claim)):\n Vi.append(model[claim[i]])\n\n V = np.zeros(len(Vi[0]))\n for i in range(len(claim)):\n for j in range(len(Vi[0])):\n V[j] = V[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V[i] * V[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V[i] = V[i] / rms\n V = V.astype(str).tolist()\n\n for doc in data[1]:\n lines = document[doc].split('\\n')\n for k in range(len(lines)):\n label = [data[0], doc, k]\n line = document[doc].split('\\n')[k]\n if line != str(k) + '\\t':\n line = line.replace(str(k) + '\\t', '')\n line = line.split('\\t')[0]\n line = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", line)\n line = line.split(' ')\n line = list(filter(lambda x: x in model.vocab, line))\n if len(line) != 0:\n Vi = []\n for i in range(len(line)):\n Vi.append(model[line[i]])\n\n V1 = np.zeros(len(Vi[0]))\n for i in range(len(line)):\n for j in range(len(Vi[0])):\n V1[j] = V1[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V1[i] * V1[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V1[i] = V1[i] / rms\n V1 = V1.astype(str).tolist()\n\n if label in evi:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 1' + '\\n')\n else:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 0' + '\\n')", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n return data", "def read_word2vec_model():\n file_name = \"word2vec_model.txt\"\n # these are the pre-2018 lines to load a model:\n # from gensim.models.word2vec import Word2Vec\n # m = Word2Vec.load_word2vec_format(file_name, binary=False)\n \n # here are the post-2018 lines to load a model:\n from gensim.models import KeyedVectors\n print(\"Starting to load the model in \", file_name, \"...\")\n m = KeyedVectors.load_word2vec_format(file_name, binary=False)\n print(\"Model loaded.\\n\")\n\n print(\"The model built is\", m, \"\\n\")\n print(\"m.vocab has\", len(m.vocab), \"words\")\n ## The above line should print\n ## m.vocab has 43981 words\n\n print(\"Each word is a vector of size\", m.vector_size)\n ## which should tells us that each word is represented by a 300-dimensional vector\n\n print(\"\\nTry m.get_vector('hello') to see one...!\\n\")\n ## Once the model is built, it can't be changed without rebuilding it; we'll leave it. \n\n return m", "def get_model_data_from_files(self, oc):\r\n # Load model related files\r\n model_path = self.config['DATA_PATH'] + self.config['CUSTOMER_NAME'] + '/models/'\r\n\r\n features_file = model_path + self.task + '_' + str(oc) + '_features.txt'\r\n dummies_file = model_path + self.task + '_' + str(oc) + '_dummies.txt'\r\n model_file = model_path + self.task + '_' + str(oc) + '.joblib'\r\n\r\n if os.path.isfile(features_file) and os.path.isfile(dummies_file) and os.path.isfile(model_file):\r\n model = joblib.load(model_file)\r\n features = open(features_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n dummies = open(dummies_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n return (model, features, dummies)\r\n return (None, None, None)", "def read_data_test(path):\n with h5py.File(path, 'r') as hf:\n input_ = np.array(hf.get('data'))\n label_ = np.array(hf.get('label'))\n\t\n return input_, label_", "def read_nc(self, fname=None):\n fname = fname if fname else self.fname\n\n super(EigFile, self).read_nc(fname)\n\n with nc.Dataset(fname, 'r') as root:\n\n # nspin, nkpt, nband\n self.EIG = root.variables['Eigenvalues'][:,:,:] \n\n # nkpt, 3\n self.Kptns = root.variables['Kptns'][:,:]", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def model_input(step=0.1):\n model = np.loadtxt('start_model.dat',dtype={'names': ('H', 'VP','VS','RHO','QP','QS',\\\n 'ETAP','ETAS','FREFP','FREFS'),'formats': ('f4', 'f4','f4','f4',\\\n 'f4','f4','f4','f4','f4','f4')}, skiprows=1)\n \n f = open('model96_input.tmp', 'w+')\n f.write('model_step96.in\\nIsotropic model\\n0\\n')\n d = np.loadtxt('cumul_depths.tmp')\n for i in np.arange(len(d)):\n for k, s in enumerate(np.arange(0,d[-1],step)):\n if s < d[i] and i==0:\n f.write('%s %s %s %s %s %s %s %s %s %s\\n'\n %(step, model['VP'][i], model['VS'][i], model['RHO'][i],\\\n model['QP'][i], model['QS'][i], model['ETAP'][i], model['ETAS'][i],\\\n model['FREFP'][i], model['FREFS'][i]))\n if i > 0:\n if s < d[i] and s > d[i-1]:\n f.write('%s %s %s %s %s %s %s %s %s %s\\n'\n %(step, model['VP'][i], model['VS'][i], model['RHO'][i],\\\n model['QP'][i], model['QS'][i], model['ETAP'][i], model['ETAS'][i],\\\n model['FREFP'][i], model['FREFS'][i]))\n f.close()\n os.system(\"mkmod96 < model96_input.tmp\")\n print (\">> Model_step96.in is ready... next step is comp_disp.bash\")\n print ('>> nlayers =', k)\n print (\">> Change nlayer in eigenfucntion_*.bash!!!!\")\n print (\">> mkmod96\")", "def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)", "def train (X, Y, modelfile='Q2/models/gaussianBinary.model', gamma=0.05, showProgress=False):\n tick = time.time()\n X = np.matrix(X)\n Y = np.matrix(Y).T\n\n m, n = X.shape\n\n # Find the Kernel Matrix KM\n KM = gaussianKM (X, X, gamma)\n\n # Parameters for CVXOPT\n YQ = Y * Y.T\n Q = np.multiply (YQ, KM)\n p = np.matrix(-np.ones((m, 1)))\n G = np.matrix(np.vstack( (-np.identity(m), np.identity(m)) ))\n h = np.matrix(np.vstack( (np.zeros((m,1)), np.ones((m,1))) ))\n A = Y.T\n b = 0\n \n # Running CVXOPT\n Q = cvx.matrix(Q)\n p = cvx.matrix(p)\n G = cvx.matrix(G)\n h = cvx.matrix(h)\n A = cvx.matrix(A, (1, m), 'd')\n b = cvx.matrix(b, (1,1), 'd')\n cvx.solvers.options['show_progress'] = showProgress\n sol = cvx.solvers.qp(P=Q, q=p, G=G, h=h, A=A, b=b)\n\n # Alphas\n alphas = np.matrix(sol['x'])\n\n # Finding the bias\n def findBias ():\n epsilon = 1e-5\n for idx, alp in enumerate(alphas):\n if (alp - 0 > epsilon and 1 - alp > epsilon):\n KM = gaussianKM (X[idx], X[idx], gamma)\n AlphaY = np.multiply (alphas, Y)\n AlphaY = np.repeat(AlphaY, 1, axis=1)\n KMalphaY = np.multiply (KM, AlphaY)\n KMalphaY = np.sum(KMalphaY, axis=0)\n b = float (Y[idx, 0] - KMalphaY)\n return b\n \n b = findBias ()\n\n # Finding the support vectors\n if (showProgress):\n epsilon = 1e-5\n sv = []\n for idx, alp in enumerate(alphas):\n if (alp - 0 > epsilon and 1 - alp > epsilon):\n sv.append(alp)\n with open('Q2/support-vectors/gaussian.vectors', 'w') as f:\n for v in sv:\n f.write(\"%.3f\\n\" % v)\n print (\"Number of Support Vectors: \", len(sv))\n # else:\n\n epsilon = 1e-5\n nSV = np.sum( (np.array(alphas) > epsilon) & (np.array(alphas) < 1 - epsilon), axis=0)\n print (\"Number of Support Vectors: \", nSV)\n\n # Saving the model\n model = (alphas, b)\n with open(modelfile, 'wb') as handle:\n pickle.dump(model, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n print (\"Time taken for gaussian CVXOPT training: \", time.time() - tick)", "def read_mb_file(self,idir='.',ifile=None, gmt=True, verbose=False):\n \n import numpy as np\n import os\n \n if gmt==True:\n gmt_file=idir+'/../maps/en_velo.gmt'\n if isinstance(gmt,str):\n gmt_file=gmt\n \n if gmt != False:\n self.read_lon_lat(gmt_file,verbose=verbose)\n \n if ifile is None:\n mb_file_basename= idir + '/mb_'+self.code+'_GPS.dat'\n else:\n mb_file_basename=ifile\n \n data_NEU = []\n for i in range(1,4):\n mb_file = mb_file_basename + str(i)\n\n # file\n self.ifile=os.path.abspath(mb_file)\n \n data=np.genfromtxt(mb_file,skip_header=4)\n \n # reshape to ensure a 2D array\n if len(data.shape)==1:\n data=data.reshape((1,data.shape[0]))\n \n\n\n data_NEU.append(data)\n\n if data_NEU[0].shape == data_NEU[1].shape == data_NEU[2].shape:\n self.data=np.zeros((data_NEU[0].shape[0],7))\n self.data[:,0]=data_NEU[0][:,0]\n self.data[:,1]=data_NEU[0][:,1]#*to_mm\n self.data[:,2]=data_NEU[1][:,1]#*to_mm\n self.data[:,3]=data_NEU[2][:,1]#*to_mm\n\n self.data[:,4]=data_NEU[0][:,2]#*to_mm\n self.data[:,5]=data_NEU[1][:,2]#*to_mm\n self.data[:,6]=data_NEU[2][:,2]#*to_mm\n\n else: \n print(\"!!! Error reading \",mb_file_basename,\" :*dat1, *dat2, *dat3 do not have the same length\")\n self.data = None", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_train = np.load(\"data/X_train.npy\")\n\t\t\t\tX_val = np.load(\"data/X_val.npy\")\n\t\t\t\tY_train = np.load(\"data/Y_train.npy\")\n\t\t\t\tY_val = np.load(\"data/Y_val.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tdata_temp = np.zeros((50000,64,64,3))\n\t\t\t\tlabel_temp = []\n\n\t\t\t\tfor i in range(5):\n\n\t\t\t\t\tfile = path + str(i+1)\n\t\t\t\t\twith open(file, 'rb') as fo:\n\t\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\t\tlabel_temp.extend(temp_element[b'labels'])\n\n\t\t\t\t\tfor j in range(10000):\n\t\t\t\t\t\tdata_temp[j+(i*10000)] = self._reshape(temp_data[j])\n\n\t\t\t\tlabel_temp = np.eye(10)[np.array(label_temp)]\n\n\t\t\t\tnp.random.seed(123)\n\t\t\t\tpermutations = list(np.random.permutation(50000))\n\t\t\t\tX = data_temp[permutations, :, : , :] \n\t\t\t\tY = label_temp[permutations, :]\n\t\t\t\tX_train = X[0:40000, :, :, :] \n\t\t\t\tY_train = Y[0:40000, :]\n\t\t\t\tX_val = X[40000:50000, :, :, :] \n\t\t\t\tY_val = Y[40000:50000, :]\n\n\t\t\t\tnp.save(\"./data/X_train\", X_train)\n\t\t\t\tnp.save(\"./data/X_val\", X_val)\n\t\t\t\tnp.save(\"./data/Y_train\", Y_train)\n\t\t\t\tnp.save(\"./data/Y_val\", Y_val)\n\t\t\t\tbreak\n\n\t\treturn X_train, X_val, Y_train, Y_val", "def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def _load_training_data(self):\n self._save_training_data()", "def read_szx_fmv_13(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"abs_line_number\"\n ]\n\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"num_val_trip\", ulong_nan), (\"f_kp\", byte_nan),\n (\"f_usable\", byte_nan), (\"land_frac\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n data[\"swath_indicator\"] = data.pop(\"swath indicator\")\n\n data[\"f_land\"] = data.pop(\"land_frac\")\n\n return data, metadata", "def execute(cf):\n\n ##Ports and parameters\n train_set = cf.get_input(\"train_set\") #training set. Typically even_file\n test_set = cf.get_input(\"test_set\") #test set. Typically odd_file\n WM1 = cf.get_input(\"WM1\")\n WM2 = cf.get_input(\"WM2\")\n WM3 = cf.get_input(\"WM3\")\n WM4 = cf.get_input(\"WM4\")\n WM5 = cf.get_input(\"WM5\")\n WM6 = cf.get_input(\"WM6\")\n WM7 = cf.get_input(\"WM7\")\n WM8 = cf.get_input(\"WM8\")\n WM9 = cf.get_input(\"WM9\")\n WM10 = cf.get_input(\"WM10\")\n WM11 = cf.get_input(\"WM11\")\n WM12 = cf.get_input(\"WM12\")\n WM13 = cf.get_input(\"WM13\")\n WM14 = cf.get_input(\"WM14\")\n WM15 = cf.get_input(\"WM15\")\n WM16 = cf.get_input(\"WM16\")\n WM17 = cf.get_input(\"WM17\")\n WM18 = cf.get_input(\"WM18\")\n WM19 = cf.get_input(\"WM19\")\n WM20 = cf.get_input(\"WM20\")\n WMdir = cf.get_input(\"WMdir\")\n WMdir2 = cf.get_input(\"WMdir2\")\n basefreqs = cf.get_input(\"BaseFrequencies\")\n ufemodel_path = cf.get_input(\"UFEmodel\")\n\n bestWM = cf.get_output(\"BestWM\")\n log_file = cf.get_output(\"log_file\")\n interm = cf.get_output(\"intermediate\")\n\n genome = cf.get_parameter('genome', 'string')\n motevo_path = cf.get_parameter('motevo_path', 'string')\n aligned = cf.get_parameter(\"aligned\", \"boolean\")\n\n os.mkdir(interm)\n\n\n\n # Read stuff in\n WMs = [i for i in[WM1, WM2, WM3, WM4, WM5, WM6, WM7, WM8, WM9, WM10, WM11, WM12, WM13, WM14, WM15, WM16, WM17, WM18, WM19, WM20] if i]\n\n if WMdir:\n WMs += [os.path.join(WMdir, wm) for wm in os.listdir(WMdir)]\n\n if WMdir2:\n WMs += [os.path.join(WMdir2, wm) for wm in os.listdir(WMdir2)]\n\n f = open(basefreqs)\n ATfreq = float(f.readline().strip().split()[1])\n GCfreq = float(f.readline().strip().split()[1])\n f.close()\n\n\n # Compute stuff: optimal priors and then likelihood of test set\n optpriors = []\n logliks = []\n\n for i, WM in enumerate(WMs):\n\n wmlen = len(open(WM).readlines())-4\n\n # 1. Fit prior on training set with EM\n tag = 'fitP_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=1, bgorder=0, bgprior=0.99)\n r = runMotevo(motevo_path, train_set, params, WM, interm, tag)\n if r != 0:\n print 'motevo failed ', tag\n sys.exit(1)\n\n # prior file:\n # WM_name final_prior nr_of_sites density\n # /import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/PipeLineSource/TESTRUN/NRF1_Z2/OUTPUT/NRF1_FgBg-runmotevoPG2_1/Logo 0.016554 635.008 0.251863\n # background 0.983446 37724.8 0.748137\n # UFEwm 0 0 0\n\n optprior = float(open(priors).readlines()[1].split()[1])\n bgprior=(1-optprior)\n print bgprior\n\n # 2. Compute log-likelihood on test set with optimal prior from training set and without EM\n tag = 'compLL_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=0, bgorder=0, bgprior=bgprior)\n runMotevo(motevo_path, train_set, params, WM, interm, tag)\n\n a = loadtxt(loglikfile, usecols=[1])\n ll = sum(a)\n\n logliks.append(ll)\n optpriors.append(optprior)\n\n print logliks\n\n\n\n #replace name in WM file with bestWM\n lines = open(WMs[argmax(logliks)]).readlines()\n lines[1] = 'NA BestWM\\n'\n bwm = open(bestWM, 'w')\n bwm.write(''.join(lines))\n\n\n l = open(log_file, 'w')\n\n l.write('WM_name\\tWM_path\\tlog_likelihood\\topt_prior\\n')\n\n names = ['WM_%i\\t%s\\t%.4f\\t%s' %(i+1, WMs[i], logliks[i], optpriors[i]) for i in arange(len(WMs))]\n\n l.write('\\n'.join(names))\n l.close()\n\n\n return 0", "def load(self):\n results = pd.read_csv(f'data/binomial_experiment_{self.margin}_C_{self.C}.csv')\n\n self.minimal_R = np.zeros((len(self.Ms), self.estimations, len(self.epsilons)))\n for i, M in enumerate(self.Ms):\n for l in range(self.estimations):\n self.minimal_R[i,l,:] = np.array(results[results['M'] == M])[l,2:]\n return", "def test_large_flmb(self):\n test_files_218 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-218*.mdd')\n\n mdd.procall(test_files_218)\n\n data_orig = self.read_full_file('node59p1.dat')\n\n # two status files from different controllers, 12371 and 12365\n data_out_71 = self.read_full_file('node59p1_0.status_1237101.dat')\n self.check_sio_type(data_out_71, ['CS', 'PS'])\n data_out_65 = self.read_full_file('node59p1_0.status_1236501.dat')\n self.check_sio_type(data_out_65, ['CS', 'PS'])\n data_out = data_out_71\n data_out += data_out_65\n\n data_adcps = self.read_full_file('node59p1_0.adcps_1237111.dat')\n self.check_sio_type(data_adcps, ['AD'])\n data_out += data_adcps\n\n data_ctdmo = self.read_full_file('node59p1_0.ctdmo_1237100.dat')\n self.check_sio_type(data_ctdmo, ['CT', 'CO'])\n data_out += data_ctdmo\n\n data_dosta = self.read_full_file('node59p1_0.dosta_1236501.dat')\n self.check_sio_type(data_dosta, ['DO'])\n data_out += data_dosta\n\n data_flort = self.read_full_file('node59p1_0.flort_1236501.dat')\n self.check_sio_type(data_flort, ['FL'])\n data_out += data_flort\n\n data_phsen = self.read_full_file('node59p1_0.phsen_1236501.dat')\n self.check_sio_type(data_phsen, ['PH'])\n data_out += data_phsen\n\n if not TestSioUnpack.compare_sio_matches(data_orig, data_out):\n self.fail(\"Failed sio block compare\")\n\n test_files = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-205*.mdd')\n test_files_217 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-217*.mdd')\n test_files_219 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-219*.mdd')\n\n test_files.extend(test_files_217)\n test_files.extend(test_files_219)\n\n mdd.procall(test_files)\n\n data_out = self.compare_node59(1, data_out)\n\n test_files = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-233*.mdd')\n test_files_231 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-231*.mdd')\n\n test_files.extend(test_files_231)\n\n mdd.procall(test_files)\n\n self.compare_node59(2, data_out)", "def load_from_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.load_from_disk(file_name)", "def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)", "def read_file(self, filename=None):\n print(f'reading file')\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read second line in file to get number of variables\n f.seek(0)\n f.readline()\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= (nvars_file + 2)\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i+1\n except KeyError:\n pass\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n base_r[i] = variables[2]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[i, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def load_e_form():\n path = os.path.join(DATA_DIR, \"eform-materialsproject-85014.csv\")\n df = pd.read_csv(path, index_col=\"mpid\")\n return df", "def main(workdir):\n dir = os.path.expanduser(workdir)\n \n #read the .dat file\n f = open('{}smi.dat'.format(dir))\n par = imp.load_source('par', '', f)\n \n #make a sdf file for visualization\n output = pybel.Outputfile(\"sdf\", dir + \"species.sdf\",overwrite=True)\n for name in par.smiles:\n smi = par.smiles[name]\n obmol = pybel.readstring(\"smi\",smi)\n output.write(obmol)\n output.close()\n \n #list with the jobs that need to be done\n jobs = []\n \n #iterate the input files\n for name in par.smiles:\n #name = input_file.replace('.inp','') #name of the calculation\n test_dir = dir + name #location where the calculations will be done\n if not os.path.exists(test_dir):\n os.mkdir(test_dir)\n \n #copy the input file to the working directory\n write_input_file(par,name,par.smiles[name],test_dir + '/input.inp')\n job = workdir + name + '/'\n jobs.append(job)\n \n run_threads(jobs, 'eric', max_running = 3)", "def unpack(self, buff, verbose=0):\n\n\n # See https://docs.python.org/3/library/struct.html#struct.pack\n # for struck pack format\n\n # Local methods to unpack numbers in little-endian format\n idx={'x':0}\n\n def read_uint8():\n idx['x']+=1\n return struct.unpack('<B', buf[idx['x']-1:idx['x']])[0]\n def read_uint32():\n idx['x']+=4\n return struct.unpack('<I', buf[idx['x']-4:idx['x']])[0]\n def read_float32():\n idx['x']+=4\n return struct.unpack('<f', buf[idx['x']-4:idx['x']])[0]\n\n # Return empty model in case the byte-array contains no information\n if len(buf) == 0:\n return None\n\n # Read global stddev and mean (not used in RQRMI version 1.1)\n _=read_float32()\n _=read_float32()\n\n num_of_stages=read_uint32()\n _log(verbose, 'Num of stages: %d' % num_of_stages)\n\n # Preallocate array\n trained_rqrmi=[None for _ in range(num_of_stages)]\n\n for s in range(num_of_stages):\n\n # Read the current stage\n num_of_models=read_uint32()\n\n _log(verbose, '\\nStage %d num of models: %d' % (s, num_of_models))\n\n # Preallocate net_list\n net_list=[None for _ in range(num_of_models)]\n\n for m in range(num_of_models):\n # Read version\n version=read_uint8()\n if version==0:\n _log(verbose, '\\nSkipping model <%d,%d>: model not compiled' % (s, m))\n continue\n elif version!=2:\n _log(verbose, '\\nUnsupported version for model <%d,%d>' % (s, m))\n continue\n\n _log(verbose, '\\nLoading model <%d, %d>: ' % (s,m))\n\n # Read model parameters\n mu=read_float32()\n sig=read_float32()\n fac=read_float32()\n omin=read_float32()\n num_of_layers=read_uint32()\n _log(verbose, 'layers: %d, ' % num_of_layers)\n\n # Preallocate net values\n net_values=[None for _ in range(2*num_of_layers-1)]\n\n # Read network structure\n structure=[None for _ in range(num_of_layers)]\n for l in range(num_of_layers):\n structure[l]=read_uint32()\n\n # Layer 0 bias\n net_values[0]=np.empty(structure[0])\n\n # Preallocate all other layers\n for l in range(1, num_of_layers):\n net_values[2*l-1]=np.empty(structure[l]) # Layer bias\n net_values[2*l-0]=np.empty([structure[l-1], structure[l]]) # Layer weights\n\n _log(verbose, 'structure: [%s]' % ','.join([str(x) for x in structure]))\n\n # Read values of first layer\n net_values[0][0]=read_float32()\n _=read_float32() # First layer weight is one (always)\n\n # Read values\n for l in range(1, num_of_layers):\n # Read bias\n for i in range(structure[l]):\n net_values[2*l-1][i]=read_float32()\n # Read weights\n for y in range(structure[l-1]):\n for x in range(structure[l]):\n net_values[2*l][y,x]=read_float32()\n\n # Update stage's net list\n net_list[m]=(mu, sig, fac, omin, net_values)\n\n # Update output with stage\n trained_rqrmi[s] = net_list\n\n # Read the maximum error of each last stage submodel\n self.error_list = []\n for e in range(len(self.trained_rqrmi[-1])):\n self.error_list.append(read_uint32())\n\n _log(verbose, '\\n')\n self.trained_rqrmi = trained_rqrmi", "def load_model(self, path):\n self._saver.restore(self._sess, path + '/model.ckp')\n pkl_file = open(path + '/som.pkl', 'rb')\n restored = pickle.load(pkl_file)\n pkl_file.close()\n self._m = restored['_m']\n self._n = restored['_n']\n self._neighbourhood = restored['_neighbourhood']\n # self._topography = restored['_topography']\n self._num_iterations = restored['_num_iterations']\n self._Wts = restored['_Wts']\n self._locations = restored['_locations']\n self._learned = restored['_learned']\n self._centroid_grid = restored['_centroid_grid']\n self.abnormal_dist = restored['abnormal_dist']\n\n print(\"Model restored from path: \" + path)", "def learn(filePath):\n filename = filePath.stem\n processedJAFFE = load(str(filePath))\n processedDF = pd.DataFrame(processedJAFFE)\n processedDF.columns = ['name', 'data', 'emotion']\n processedDF = processedDF.sort_values(by=['name', 'emotion'])\n grouped = processedDF.groupby(['name', 'emotion'])\n train = grouped.nth([0, 1])\n test = grouped.nth([2, 3, 4])\n\n yTrain = train.index.get_level_values(1).tolist()\n xTrain = train.values.ravel().tolist()\n yTest = test.index.get_level_values(1).tolist()\n xTest = test.values.ravel().tolist()\n\n parameters = {\n 'C': [\n 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08, 1.00E-07, 1.00E-06,\n 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02, 1.00E-01, 1.00,\n 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05\n ],\n 'gamma': [\n 1.00E00,\n 1.00E-01,\n 1.00E-02,\n 1.00E-03,\n 5.00E-04, 2.00E-04, 1.50E-04, 1.10E-04, 1.05E-04, 1.00E-04,\n 9.50E-05, 9.00E-05, 7.00E-05, 5.00E-05, 1.90E-05, 1.00E-05,\n 1.00E-06,\n 1.00E-07,\n ],\n }\n\n models = []\n models.append(['gamma \\\\ C', 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08,\n 1.00E-07, 1.00E-06, 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02,\n 1.00E-01, 1.00, 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05 ])\n gridTimeStart = time()\n numIteration = len(parameters['gamma']) * len(parameters['C'])\n iteration = 0\n meanTime = 0\n for gamma in parameters['gamma']:\n row = [gamma]\n for C in parameters['C']:\n print('C = %s \\t gamma = %s'%(C, gamma))\n timeStart = time()\n svc = OneVsRestClassifier(SVC(random_state=0, decision_function_shape='ovr',\n C=C, kernel='rbf', gamma=gamma), n_jobs=4)\n svc.fit(xTrain, yTrain)\n yTrue, yPred = yTest, svc.predict(xTest)\n yTrue = np.array(yTrue, dtype=np.unicode_)\n yPred = np.array(yPred, dtype=np.unicode_)\n correct = np.sum(yTrue == yPred)\n \n print(\"accuracy: %d/%d = \"%(correct, len(yTrue)),\n D('%.2f'%(correct/len(yTrue)*100)))\n row.append(D('%.2f'%(correct/len(yTrue)*100)))\n \n iterTime = time()-timeStart\n iteration = iteration + 1\n meanTime = meanTime * (iteration-1)/iteration + iterTime/iteration\n remainingTime = (numIteration-iteration)*meanTime\n print('--------------------------(%d sec)--remaining: %s'%\n (iterTime, str(timedelta(seconds=int(remainingTime)))))\n models.append(row)\n gridTime = time() - gridTimeStart\n gridTime = timedelta(seconds=int(gridTime))\n print('time: %s'%str(gridTime))\n print('saving file: %s.csv'%filename)\n with open('../csv/%s.csv'%filename, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerows(models)", "def download_data(dev_mode: str, model: word2vec.Word2Vec) -> (np.ndarray, np.ndarray):\n assert dev_mode.lower() == 'false' or dev_mode.lower() == 'true'\n \n if dev_mode.lower() == 'false':\n print('Using Actual Data...')\n data_path = os.path.join(args.data_dir, 'HIV.csv')\n df = pd.read_csv(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(Chem.MolFromSmiles(x['smiles']), 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['HIV_active'].astype(int))\n else:\n # use example data set\n data_path = os.path.join(args.data_dir, 'ames.sdf')\n df = PandasTools.LoadSDF(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(x['ROMol'], 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['class'].astype(int))\n \n return X,y", "def test_downloadModel(self):\n\t\tmodel_in = \"\"\n\t\tquery_localdirs = cancerscope.get_models.findmodel(os.path.dirname(cancerscope.__file__), \"v1_rm500\")\n\t\tif query_localdirs is not None:\n\t\t\tmodel_in = query_localdirs[\"v1_rm500\"]\n\t\telse:\n\t\t\tmodel_in = cancerscope.get_models.downloadmodel(model_label=\"v1_rm500\")\n\t\t\n\t\tself.assertTrue(os.path.isdir(model_in))\n\t\tself.assertTrue(os.path.exists(\"\".join([model_in, \"/lasagne_bestparams.npz\"])))\n\t\t\n\t\t\"\"\"Test if model can be setup correctly\"\"\"\n\t\tlmodel = cancerscope.scopemodel(model_in)\n\t\tlmodel.fit()\n\t\n\t\tself.assertEqual(len(lmodel.features), 17688)", "def retrieve_additional_files(input_qchem, data_fchk, work_dir, scratch_read_level=0):\n\n additional_data = {}\n\n natom = len(input_qchem.molecule.get_coordinates())\n file_list = os.listdir(work_dir)\n\n # OLD_DIMENSIONS\n if '819.0' in file_list:\n with open(work_dir + '819.0', 'r') as f:\n data = np.fromfile(f, dtype=np.int32)\n norb_alpha, norb_beta = data[0:2]\n norb = norb_alpha\n nbas = norb # assumption\n else:\n norb = np.shape(data_fchk['coefficients']['alpha'])[0]\n nbas = np.shape(data_fchk['coefficients']['alpha'])[1]\n\n\n # MO_COEFS (Already in fchk) in internal order\n if '53.0' in file_list and 'coefficients' in data_fchk:\n with open(work_dir + '53.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n mo_alpha = data[:norb*nbas].reshape(-1, norb).tolist()\n mo_beta = data[norb*nbas: 2*norb_beta*nbas].reshape(-1, norb_beta).tolist()\n # additional_data['coefficients_internal'] = {'alpha': mo_alpha, 'beta': mo_beta}\n\n # obtain the order indices between fchk order and Q-Chem internal order of basis functions\n diff_square = get_sdm(data_fchk['coefficients']['alpha'], mo_alpha)\n\n # get non-repeating indices\n indices = []\n for row in diff_square.T:\n for i in np.argsort(row):\n if i not in indices:\n indices.append(int(i))\n break\n\n # indices = np.argmin(diff_square, axis=0).tolist()\n\n # store q-chem index order for later use (e.g guess)\n data_fchk['coefficients']['qchem_order'] = indices\n else:\n indices = list(range(nbas))\n\n # FOCK_MATRIX\n if '58.0' in file_list:\n with open(work_dir + '58.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n fock_alpha = data[:nbas*nbas].reshape(-1, nbas)\n fock_beta = data[nbas*nbas: 2*nbas*nbas].reshape(-1, nbas)\n\n # set basis functions in fchk order\n fock_alpha = fock_alpha[:, indices]\n fock_alpha = fock_alpha[indices, :]\n fock_beta = fock_beta[:, indices]\n fock_beta = fock_beta[indices, :]\n\n additional_data['fock_matrix'] = {'alpha': fock_alpha.tolist(), 'beta': fock_beta.tolist()}\n\n if scratch_read_level == -1:\n # FILE_ENERGY (Not really worth to read it)\n if '99.0' in file_list:\n with open(work_dir + '99.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n\n # FILE_DENSITY_MATRIX (Already in fchk)\n if '54.0' in file_list:\n with open(work_dir + '54.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n density_alpha = data[:nbas*nbas].reshape(-1, nbas)\n density_beta = data[nbas*nbas: 2*nbas*nbas].reshape(-1, nbas)\n # set basis functions in fchk order\n density_alpha = density_alpha[:, indices]\n density_alpha = density_alpha[indices, :]\n density_beta = density_beta[:, indices]\n density_beta = density_beta[indices, :]\n additional_data['scf_density_internal'] = {'alpha': density_alpha.tolist(), 'beta': density_beta.tolist()}\n\n # HESSIAN_MATRIX\n if '132.0' in file_list:\n with open(work_dir + '132.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n hessian = data.reshape(-1, natom*3)\n additional_data['hessian'] = hessian.tolist()\n\n # AO_INTS_DEBUG\n if '21.0' in file_list:\n with open(work_dir + '21.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n ao_integrals = data.reshape(-1, nbas, nbas, nbas)\n\n # set basis functions in fchk order\n ao_integrals = ao_integrals[:, :, :, indices]\n ao_integrals = ao_integrals[:, :, indices, :]\n ao_integrals = ao_integrals[:, indices, :, :]\n ao_integrals = ao_integrals[indices, :, :, :]\n\n additional_data['ao_integrals'] = ao_integrals.tolist()\n\n if scratch_read_level > 0:\n # FILE_RAS_AMP\n if '704.0' in file_list:\n with open(work_dir + '705.0', 'r') as f:\n ras_energies = np.fromfile(f, dtype=float)\n n_ras_roots = len(ras_energies)\n\n with open(work_dir + '704.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n ras_amplitudes = data.reshape(n_ras_roots, -1)\n additional_data['ras_amplitudes'] = ras_amplitudes.tolist()\n\n return additional_data", "def _load20news_miao():\n DIR = os.path.dirname(os.path.realpath(__file__)).split('vae_sparse')[0]+'vae_sparse/optvaedatasets'\n DIR += '/20news_miao'\n h5file = DIR+'/miao.h5'\n if not os.path.exists(h5file):\n flen = len(open(DIR+'/vocab').readlines())\n print 'DIM: ',flen\n np.random.seed(1)\n TRAIN_VALID_MAT = readSparseFile(DIR+'/train.feat', flen, zeroIndexed=False)\n idx = np.random.permutation(TRAIN_VALID_MAT.shape[0])\n VALIDMAT = TRAIN_VALID_MAT[idx[:500]]\n TRAINMAT = TRAIN_VALID_MAT[idx[500:]]\n TESTMAT = readSparseFile(DIR+'/test.feat', flen, zeroIndexed=False) \n saveSparseHDF5(TRAINMAT,'train', h5file)\n saveSparseHDF5(VALIDMAT,'valid', h5file)\n saveSparseHDF5(TESTMAT, 'test' , h5file)\n dset = {}\n dset['vocabulary']= [k.strip().split(' ')[0] for k in open(DIR+'/vocab').readlines()]\n dset['train'] = loadSparseHDF5('train',h5file)\n dset['valid'] = loadSparseHDF5('valid',h5file)\n dset['test'] = loadSparseHDF5('test',h5file)\n dset['dim_observations'] = dset['train'].shape[1]\n dset['data_type'] = 'bow'\n return dset", "def read(f):\n \n if isinstance(f, basestring):\n # If the input is a string, treat as file name\n with open(f) as fh: # Ensure file is closed\n return read(fh) # Call again with file object\n \n # First line contains the date\n date = f.readline()\n if not date:\n raise IOError(\"Cannot read from input file \"+str(filename))\n \n # Second is description\n desc = f.readline()\n \n token = file_numbers(f)\n \n # Third contains number of mesh points\n try:\n npsi = int(token.next())\n ntheta = int(token.next())\n isym = int(token.next())\n except StopIteration:\n raise IOError(\"Unexpected end of file while reading grid size\")\n except ValueError:\n raise IOError(\"Third line should contain npsi, ntheta and isym\")\n \n # Check values\n if (isym < 0) or (isym > 1):\n raise IOError(\"isym must be either 0 or 1\")\n if (npsi < 1) or (ntheta < 1):\n raise IOError(\"Invalid npsi=\"+str(npsi)+\" or ntheta=\" + str(ntheta))\n \n # Read normalisation factors\n\n try:\n rcnt = float(token.next())\n xma = float(token.next())\n zma = float(token.next())\n btor = float(token.next())\n curtot = float(token.next())\n eaxe = float(token.next())\n dnorm = float(token.next())\n except:\n raise IOError(\"Couldn't read normalisation factors\")\n \n def read_array(n, name=\"Unknown\"):\n data = np.zeros([n])\n try:\n for i in np.arange(n):\n data[i] = float(token.next())\n except:\n raise IOError(\"Failed reading array '\"+name+\"' of size \", n)\n return data\n\n def read_2d(nx, ny, name=\"Unknown\"):\n data = np.zeros([nx, ny])\n for i in np.arange(nx):\n data[i,:] = read_array(ny, name+\"[\"+str(i)+\"]\")\n return data\n\n # Read 1D arrays\n psiflux = read_array(npsi, \"psiflux\")\n fnorm = read_array(npsi, \"fnorm\")\n ffpnorm = read_array(npsi, \"ffpnorm\")\n ponly = read_array(npsi, \"ponly\")\n pponly = read_array(npsi, \"pponly\")\n qsf = read_array(npsi, \"qsf\")\n d = read_array(npsi, \"d\")\n \n dpdz = read_array(ntheta, \"dpdz\")\n dpdr = read_array(ntheta, \"dpdr\")\n \n # 2D arrays\n \n xnorm = read_2d(ntheta, npsi, \"xnorm\")\n znorm = read_2d(ntheta, npsi, \"znorm\")\n \n # Try to read Br and Bz (may be present)\n try:\n Br = read_2d(ntheta, npsi, \"Br\")\n Bz = read_2d(ntheta, npsi, \"Bz\")\n except:\n Br = Bz = None\n \n ny = ntheta\n\n if isym == 1:\n # Fill in values for up-down symmetric case\n print(\"Grid is up-down symmetric. Reflecting grid about midplane\")\n ny = tsize = 2*(ntheta - 1) + 1\n \n def reflect(data, mapfunc = lambda x:x):\n \"\"\" Reflect a variable about midplane\n Optionally supply a mapping function\"\"\"\n data2 = np.zeros([tsize, npsi])\n # Copy the original data\n for i in np.arange(ntheta):\n data2[i,:] = data[i,:]\n # Now fill in the remainder\n for i in np.arange(ntheta, tsize):\n t0 = tsize - 1 - i\n data2[i,:] = mapfunc(data[t0,:])\n return data2\n \n xnorm = reflect(xnorm)\n znorm = reflect(znorm, lambda x: 2.*zma - x) # Reflect about zma\n if Br != None:\n Br = reflect(Br, lambda x:-x) # Br reverses\n if Bz != None:\n Bz = reflect(Bz) # Bz remains the same\n theta = tsize\n\n # Make sure we have Br, Bz and Bpol\n\n if (Br == None) or (Bz == None):\n # Calculate Bpol from psi then Br and Bz from Bpol\n # Use dpsi = R*Bp dx (for now)\n Bpol = np.zeros([ny, npsi])\n \n def deriv(f):\n n = np.size(f)\n dfdi = np.zeros(n)\n dfdi[1:-1] = (f[2:n] - f[0:-2])/2. # Central difference in the middle\n dfdi[0] = f[1] - f[0]\n dfdi[-1] = f[-1] - f[-2]\n return dfdi\n \n for i in np.arange(ntheta):\n drdi = deriv(xnorm[i, :])\n dzdi = deriv(znorm[i, :])\n dldi = sqrt(drdi**2 + dzdi**2) # Arc length\n dpsidi = deriv(psiflux)\n \n Bpol[i, :] = dpsidi / (dldi * xnorm[i,:])\n else:\n Bpol = np.sqrt(Br**2 + Bz**2)\n \n # Calculate toroidal field\n Btor = fnorm / xnorm\n \n #########################################\n # Create a dictionary of values to return\n # \n # Need to transpose 2D arrays to [psi, theta] \n # to be consistent with elite inputs\n \n var = {\"npsi\":npsi, \"npol\":ny, # Sizes\n \n \"psi\":psiflux,\n \"f(psi)\":fnorm,\n \"p\":ponly,\n \n \"R\": np.transpose(xnorm),\n \"Z\": np.transpose(znorm),\n\n \"Bp\":np.transpose(Bpol),\n \"Bt\":np.transpose(Btor),\n\n \"q\":qsf,\n\n \"ffprime\":ffpnorm,\n \"pprime\":pponly}\n\n if Br != None:\n var['Br'] = np.transpose(Br)\n if Bz != None:\n var['Bz'] = np.transpose(Bz)\n \n return var" ]
[ "0.590859", "0.5844918", "0.5836278", "0.57506764", "0.5689922", "0.565109", "0.5614794", "0.55639184", "0.55183434", "0.5517771", "0.5503399", "0.5463083", "0.5459767", "0.54251224", "0.5416442", "0.5392664", "0.5355077", "0.53432804", "0.5338355", "0.53167695", "0.53060263", "0.5300313", "0.52990466", "0.5294528", "0.5285144", "0.52764696", "0.52449805", "0.5238722", "0.5230759", "0.5229293", "0.5220341", "0.5207541", "0.5202847", "0.5186163", "0.51727396", "0.5168217", "0.5168121", "0.51587564", "0.5157575", "0.5156601", "0.51506037", "0.5144636", "0.5136459", "0.51357245", "0.51289415", "0.5127496", "0.51256764", "0.51190746", "0.51188916", "0.5107517", "0.51070607", "0.5102604", "0.51008147", "0.50868785", "0.50836647", "0.50803185", "0.50785387", "0.5067348", "0.5062728", "0.50616777", "0.5061545", "0.5057816", "0.5057292", "0.50533336", "0.5027981", "0.5026874", "0.50145936", "0.5010863", "0.5010863", "0.50108397", "0.5010455", "0.50071555", "0.500521", "0.500267", "0.49928284", "0.49883023", "0.4988067", "0.49809754", "0.4980833", "0.49773696", "0.4972483", "0.4969894", "0.4969284", "0.49648198", "0.495959", "0.4957055", "0.49545744", "0.49528798", "0.49513024", "0.49508724", "0.4950774", "0.4946828", "0.49464223", "0.49444637", "0.49438277", "0.49368033", "0.49333045", "0.49276766", "0.49273193", "0.4913911", "0.49130055" ]
0.0
-1
Read the VQE convergence data for the mini BMN model from disk
def collect_data( optimizers: list, p: dict, ): # concatenate the results from all files frames = [read_data(o, p) for o in optimizers] return pd.concat(frames, keys=optimizers, names=["Optimizer"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_models():\n model_files_cvd = np.sort(glob.glob(\"./grad_results/cvd*N1024_f0003.npy\"))\n model_files_mnist = np.sort(glob.glob(\"./grad_results/mnist*N25000_f02.npy\"))\n\n model_files_cvd = np.array([model_files_cvd[2], model_files_cvd[1], model_files_cvd[0]])\n\n results_cvd = []\n results_mnist = []\n\n for filename in model_files_cvd:\n results_cvd.append(np.load(filename))\n \n for filename in model_files_mnist:\n results_mnist.append(np.load(filename))\n\n return np.array(results_mnist), np.array(results_cvd)", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def readin():\r\n nodes = np.loadtxt('Vnodes.txt', ndmin=2)\r\n mats = np.loadtxt('Vmater.txt', ndmin=2)\r\n elements = np.loadtxt('Veles.txt', ndmin=2)\r\n loads = np.loadtxt('Vloads.txt', ndmin=2)\r\n return nodes, mats, elements, loads", "def read_dataset_v1():\n path = load_config()\n T = feather.read_dataframe(path['data_dir'] / 'T_dat.feather')\n E = feather.read_dataframe(path['data_dir'] / 'E_dat.feather')\n M = feather.read_dataframe(path['data_dir'] / 'Meta.feather')\n data = sio.loadmat(path['data_dir'] / 'highvar_genes.mat', squeeze_me=True)\n return T, E, M, data", "def read_dataset_v2():\n path = load_config()\n T = feather.read_dataframe(path['data_dir'] / 'T_dat_v2.feather')\n E = feather.read_dataframe(path['data_dir'] / 'E_dat_v2.feather')\n M = feather.read_dataframe(path['data_dir'] / 'Meta_v2.feather')\n data = sio.loadmat(path['data_dir'] / 'highvar_genes_v2.mat', squeeze_me=True)\n return T, E, M, data", "def load_vae_full(path, nb_of_bands, folder=False): \n latent_dim = 32\n \n # Build the encoder and decoder\n encoder, decoder = model.vae_model(latent_dim, nb_of_bands)\n\n # Build the model\n vae_loaded, vae_utils, Dkl = vae_functions.build_vanilla_vae(encoder, decoder, full_cov=False, coeff_KL = 0)\n\n if folder == False: \n vae_loaded.load_weights(path)\n else:\n print(path)\n latest = tf.train.latest_checkpoint(path)\n vae_loaded.load_weights(latest)\n\n return vae_loaded, vae_utils, encoder, decoder, Dkl", "def main(model_path='models/Nakakuki_Cell_2010_ODE'):\n n_file = []\n fitparam_files = os.listdir(model_path.strip('/') + '/fitparam')\n for file in fitparam_files:\n if re.match(r'\\d', file):\n n_file.append(int(file))\n for nth_paramset in n_file:\n os.makedirs(\n model_path.strip('/') \n + '/dat2npy/out/{:d}'.format(nth_paramset), exist_ok=True\n )\n nth_fitparam_files = os.listdir(\n model_path.strip('/') + '/fitparam/{:d}'.format(nth_paramset)\n )\n for dat_file in nth_fitparam_files:\n if 'fit' in dat_file:\n \"\"\"\n - fit_param%d.dat -> fit_param%d.npy\n - best_fitness.dat -> best_fitness.npy\n \"\"\"\n try:\n data = np.loadtxt(\n model_path.strip('/') + '/fitparam/{:d}/{}'.format(\n nth_paramset, dat_file\n ), dtype='float'\n )\n except ValueError:\n pass\n else:\n \"\"\"\n - count_num.dat -> count_num.npy\n - generation.dat -> generation.npy\n \"\"\"\n data = np.loadtxt(\n model_path.strip('/') + '/fitparam/{:d}/{}'.format(\n nth_paramset, dat_file\n ), dtype='int'\n )\n np.save(\n model_path.strip('/') + '/dat2npy/out/{:d}/'.format(nth_paramset)\n + dat_file.replace('.dat', '.npy'), data\n )\n if os.path.isfile(\n './logs/{:d}.log'.format(nth_paramset)):\n shutil.copyfile(\n './logs/{:d}.log'.format(nth_paramset),\n model_path.strip('/') \n + '/dat2npy/out/{:d}/optimization.log'.format(nth_paramset)\n )", "def load_data():\n\n dump_path = dump_base + '/micro_poi/mpoi_info/'\n\n assert os.path.exists(dump_path)\n\n dpath = dump_path + 'shortest_path.pickle'\n paths = joblib.load(dpath)\n\n dpath = dump_path + 'path_list.pickle'\n path_list = joblib.load(dpath)\n\n dpath = dump_path + 'gain.pickle'\n gain = joblib.load(dpath)\n\n dpath = dump_path + 'stay.pickle'\n stay_time = joblib.load(dpath)\n\n dpath = dump_path + 'reach.pickle'\n reach_time = joblib.load(dpath)\n\n spath = dump_base + '/micro_poi/model_params.list'\n model_params = np.loadtxt(spath)\n\n return np.array(paths), path_list, gain, stay_time, reach_time, model_params", "def load_back_from_disk(data_dir, istrain=True):\n \"\"\"load back metadata_df\"\"\"\n meta_data = pickle.load(open(os.path.join(data_dir, 'meta.pkl'), 'rb'))\n metadata_rows = meta_data[0]\n max_node = meta_data[1]\n\n \"\"\"itershard by loading from disk\"\"\"\n all_X, all_y, all_size, all_L, all_names, all_node_img = [], [], [], [], [], []\n\n for _, row in enumerate(metadata_rows):\n X = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['X'])))\n L = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['L'])))\n y = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['y'])))\n size = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['size'])))\n names = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['name'])))\n node_img = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['node_img'])))\n\n \"\"\" stack to list\"\"\"\n all_X.append(X)\n all_y.append(y)\n all_L.append(L)\n all_size.append(size)\n all_names.append(names)\n all_node_img.append(node_img)\n\n \"\"\" return a Dataset contains all X, y, w, ids\"\"\"\n all_X = np.squeeze(np.vstack(all_X))\n all_L = np.squeeze(np.vstack(all_L))\n all_y = np.squeeze(np.concatenate(all_y))\n all_size = np.squeeze(np.concatenate(all_size))\n all_names = np.squeeze(np.concatenate(all_names))\n all_node_img = np.squeeze(np.concatenate(all_node_img))\n\n # create output dataset\n dataset = dict()\n if istrain:\n dataset['X'] = all_X[:TRAIN_NUM]\n dataset['y'] = all_y[:TRAIN_NUM]\n dataset['size'] = all_size[:TRAIN_NUM]\n dataset['L'] = all_L[:TRAIN_NUM]\n dataset['name'] = all_names[:TRAIN_NUM]\n dataset['node_img'] = all_node_img[:TRAIN_NUM]\n else:\n dataset['X'] = all_X[:TEST_NUM]\n dataset['y'] = all_y[:TEST_NUM]\n dataset['size'] = all_size[:TEST_NUM]\n dataset['L'] = all_L[:TEST_NUM]\n dataset['name'] = all_names[:TEST_NUM]\n dataset['node_img'] = all_node_img[:TEST_NUM]\n\n return dataset, max_node", "def load_vae_conv(path,nb_of_bands,folder = False): \n latent_dim = 32\n \n # Build the encoder and decoder\n encoder, decoder = model.vae_model(latent_dim, nb_of_bands)\n\n # Build the model\n vae_loaded, vae_utils, Dkl = vae_functions.build_vanilla_vae(encoder, decoder, full_cov=False, coeff_KL = 0)\n\n if folder == False: \n vae_loaded.load_weights(path)\n else:\n latest = tf.train.latest_checkpoint(path)\n vae_loaded.load_weights(latest)\n\n return vae_loaded, vae_utils, encoder, Dkl", "def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76", "def read_model(node_file, mater_file, els_file, load_file, verbose=True):\n # rea\n nodes = np.loadtxt(node_file)\n mats = np.loadtxt(mater_file)\n elements = np.loadtxt(els_file, dtype=int)\n loads = np.loadtxt(load_file)\n \n # Generate echo files\n if verbose:\n np.savetxt(\"KNODES.txt\", nodes, fmt='%5.2f', delimiter=' ')\n np.savetxt(\"KMATES.txt\", mats, fmt='%5.2f', delimiter=' ')\n np.savetxt(\"KELEMS.txt\", elements, fmt='%5.2f', delimiter=' ')\n np.savetxt(\"KLOADS.txt\", loads, fmt='%5.2f', delimiter=' ')\n \n return nodes, mats, elements, loads", "def read_glm_epochs(infile):\n with open(infile, 'rb') as outp:\n glmepec = pickle.load(outp)\n return glmepec", "def parse_BS_data(retrieved_folder, fermi_level, kpoints):\n # conversion factor from Ry to eV\n eVscale = get_Ry2eV()\n\n retrieved_list = retrieved_folder.list_object_names()\n qdos_file_list = [i for i in retrieved_list if 'qdos.' in i]\n q_vec_file = 'qvec.dat'\n\n if q_vec_file in retrieved_list:\n with retrieved_folder.open(q_vec_file) as file_opened:\n q_vec = np.loadtxt(file_opened, skiprows=1)\n\n for icount, fname in enumerate(qdos_file_list):\n with retrieved_folder.open(fname) as _f:\n loaded_file = np.loadtxt(_f)\n if icount == 0:\n total_qdos = loaded_file\n else:\n total_qdos[:, 5:] += loaded_file[:, 5:]\n\n ef = fermi_level.value # in Ry unit\n total_qdos[:, 0] = (total_qdos[:, 0] - ef) * eVscale\n eng_points = set(total_qdos[:, 0])\n eng_points = np.sort(list(eng_points))\n no_eng_points = len(eng_points)\n\n qdos_intensity = np.ndarray(shape=(no_eng_points, len(q_vec)))\n for ne in range(np.shape(qdos_intensity)[0]):\n nk = np.shape(qdos_intensity)[1]\n # sum up all l-channels (5 is only the s-channel!)\n qdos_intensity[ne, :] = np.sum(total_qdos[ne * nk:(ne + 1) * nk, 5:], axis=1) / eVscale\n\n qdos_intensity = qdos_intensity.T # setting eng-kpts corresponds to x-y asix\n q_vec = np.asarray(q_vec) # converting q_vec into array\n eng_points = (np.asarray(eng_points)) # converting eng_popints into array in Ry unit\n\n # To save into the ArrayData\n array = ArrayData()\n array.set_array('BlochSpectralFunction', qdos_intensity)\n array.set_array('Kpts', q_vec)\n array.set_array('energy_points', eng_points)\n if kpoints.labels is not None:\n klbl_dict = dict(kpoints.labels) # Special k-points\n array.extras['k-labels'] = klbl_dict\n\n return {'BS_Data': array}", "def load_data_from_disk(self):\n data = dict()\n Omega_M = self.theta_fid[0]\n der_den = 1. / (2. * self.delta_theta)\n\n print (\"Loading data from disk.. Omega_M = \", Omega_M, \"delta_theta = \", self.delta_theta[0])\n\n for key in ['x_central', 'x_m', 'x_p', 'x_central_test', 'x_m_test', 'x_p_test']:\n data[key] = np.load(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy')\n\n return data, der_den", "def test_run_read(self):\n\n self.ictrl[0] = 1 + 2 + 4 + 8\n vmec_f90wrap.runvmec(self.ictrl, self.filename, self.verbose, \\\n self.fcomm, reset_file)\n\n self.assertTrue(self.ictrl[1] in success_codes)\n\n self.assertEqual(vmec_f90wrap.vmec_input.nfp, 3)\n self.assertEqual(vmec_f90wrap.vmec_input.mpol, 4)\n self.assertEqual(vmec_f90wrap.vmec_input.ntor, 3)\n print('rbc.shape:', vmec_f90wrap.vmec_input.rbc.shape)\n print('rbc:',vmec_f90wrap.vmec_input.rbc[101:103, 0:4])\n\n # n = 0, m = 0:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.rbc[101,0], 1.3782)\n\n # n = 0, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[101,1], 4.6465E-01)\n\n # n = 1, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[102,1], 1.6516E-01)\n\n # Now try reading in the output\n wout_file = os.path.join(os.path.dirname(__file__), 'wout_li383_low_res.nc')\n ierr = 0\n vmec_f90wrap.read_wout_mod.read_wout_file(wout_file, ierr)\n self.assertEqual(ierr, 0)\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.betatot, \\\n 0.0426215030653306, places=4)\n\n print('iotaf.shape:',vmec_f90wrap.read_wout_mod.iotaf.shape)\n print('rmnc.shape:',vmec_f90wrap.read_wout_mod.rmnc.shape)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.iotaf[-1], \\\n 0.654868168783638, places=4)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.rmnc[0, 0], \\\n 1.4773028173065, places=4)", "def read_model(filename):\n return joblib.load(filename)", "def load_data(self):\n x_vector = pickle.load(open(self.file_stem + \"x.pickle\", \"rb\"))\n ode_sols = pickle.load(open(self.file_stem + \"sols.pickle\", \"rb\"))\n forcings = pickle.load(open(self.file_stem + \"fs.pickle\", \"rb\"))\n sl_coeffs = pickle.load(open(self.file_stem + \"coeffs.pickle\", \"rb\"))\n\n return x_vector, ode_sols, forcings, sl_coeffs", "def load_model_file(device_index):\n print(\"\\nStart loading model...\")\n\n return kdp_wrapper.isi_load_nef(device_index, MODEL_FILE, ISI_APP_ID)", "def test_readfile(self):\n fname = os.path.join(self.datadir, 'monol_testA_E3-50_rebin4_gti') + \\\n HEN_FILE_EXTENSION\n command = \"{0}\".format(fname)\n\n hen.io.main(command.split())", "def load_NMF_model():\n model = pickle.load(open(\"models/nmf_model.sav\", 'rb'))\n Q = model.components_ \n return model, Q", "def read_qmcpack_dense(filename):\n with h5py.File(filename, 'r') as fh5:\n enuc = fh5['Hamiltonian/Energies'][:][0]\n dims = fh5['Hamiltonian/dims'][:]\n hcore = fh5['Hamiltonian/hcore'][:]\n chol = fh5['Hamiltonian/DenseFactorized/L'][:]\n\n return hcore, chol, enuc", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def _read_csvs(self):\n self.data = pd.read_csv(self.path+self.name, index_col=0)", "def load_epsvec( fname ):\n try:\n E = numpy.loadtxt( fname )\n except IOError:\n prefix = '/data/jberwald/neurons/epsilons/'\n E = numpy.loadtxt( prefix + fname )\n return E", "def test_read_input(self):\n self.ictrl[0] = run_modes['input']\n vmec_f90wrap.runvmec(self.ictrl, self.filename, self.verbose, \\\n self.fcomm, reset_file)\n\n self.assertTrue(self.ictrl[1] in success_codes)\n\n self.assertEqual(vmec_f90wrap.vmec_input.nfp, 3)\n self.assertEqual(vmec_f90wrap.vmec_input.mpol, 4)\n self.assertEqual(vmec_f90wrap.vmec_input.ntor, 3)\n print('rbc.shape:', vmec_f90wrap.vmec_input.rbc.shape)\n print('rbc:',vmec_f90wrap.vmec_input.rbc[101:103, 0:4])\n\n # n = 0, m = 0:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.rbc[101,0], 1.3782)\n\n # n = 0, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[101,1], 4.6465E-01)\n\n # n = 1, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[102,1], 1.6516E-01)", "def get_examples(data_dir, mode, task_id, shard_id):\n file_path = get_full_filename(data_dir, mode, task_id, shard_id)\n relative_path = \"/\".join(file_path.split(\"/\")[3:])\n tf.logging.info(\"Reading file: %s\" % (file_path))\n print(relative_path)\n #client = storage.Client(projectname, credentials=credentials)\n #bucket = client.get_bucket(bucket_name)\n blob = storage_bucket.blob(relative_path)\n if not blob.exists():\n tf.logging.info(\"Path doesn't exist\")\n return None\n nq_data = extract_nq_data(file_path)\n tf.logging.info(\"NQ data Size: \" + str(len(nq_data.keys())))\n\n tf.logging.info(\"Performing entity extraction\")\n fact_extracted_data = entity_link_nq(nq_data)\n return fact_extracted_data", "def read_data():\n ADV_MAT = np.load('ADV.npy');\n ADJ_MAT = np.load('ADJ.npy');\n PR_MAT = np.load('PR.npy'); \n NN_MAT = np.load('NN.npy');\n for i in range(ADV_MAT.shape[0]):RUNNING_DATA['ADV___'+str(i)] = ADV_MAT[i];\n for i in range(ADJ_MAT.shape[0]):RUNNING_DATA['ADJ___'+str(i)] = ADJ_MAT[i];\n for i in range(PR_MAT.shape[0]):RUNNING_DATA['PR___'+str(i)] = PR_MAT[i];\n for i in range(NN_MAT.shape[0]):RUNNING_DATA['NN___'+str(i)] = NN_MAT[i];", "def load_model(file_index):\n normal, abnormal, all = read_in(file_index, 1, 2, 0.3)\n autoencoder = keras.models.load_model('Working_Data/ae_patient_' + str(file_index) + '_dim' + str(100) + '_model.h5')\n reconstructed = autoencoder.predict(all)\n reconstruction_save = \"Working_Data/reconstructed_cdae_10d_Idx\" + str(file_index) + \".npy\"\n np.save(reconstruction_save, reconstructed)", "def read_mesa(self, filename=None):\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read first line in file to get number of parameters\n f.seek(0)\n l = f.readline()\n nparams_file = int(l.split(' ')[-1])\n\n # skip lines 2-4\n for i in range(3):\n f.readline()\n\n # the fifth line will give us the number of variables\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= 6\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n logR_idx = -1\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n if var_name_file == 'logR':\n logR_idx = i\n continue\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i\n except KeyError:\n var_idx_map[self.idx['spec'] - 1 + network_module.network_species_index(var_name_file.lower())] = i\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n # need to reverse the inputs file here\n\n n = npts_file - i - 1\n\n base_r[n] = R_solar * 10**variables[logR_idx]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[n, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def _fetch_large():\n # Large training data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TRAIN.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"train\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TRAIN.tar.gz\",\n \"mv SMNI_CMI_TRAIN train\",\n \"find train | grep gz$ | xargs gunzip\",\n ],\n )\n # Large test data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TEST.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"test\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TEST.tar.gz\",\n \"mv SMNI_CMI_TEST test\",\n \"find test | grep gz$ | xargs gunzip\",\n ],\n )", "def read_data(self):\n data = np.genfromtxt(self.__file) # Planck SED\n self.__nu = 10.0**data[:,0]\n self.__nuF = 10.0**data[:,2]\n self.__err = 10.0**data[:,3]\n #self.__W = 10.0**data[:,4]\n self.__yerr = [ self.__nuF - self.__nuF / self.__err, \\\n self.__nuF * self.__err - self.__nuF ]\n self.__maxY = max( self.__nuF )\n self.__minY = min( self.__nuF )", "def read_data(self):\n self.data = reduce_spectrum(self.filename)", "def load_vecs():\n global VECTORIZER\n global CECTORIZER\n\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n\n if os.path.isfile(v_file) and os.path.isfile(d_file):\n with open(v_file, 'rb') as f:\n VECTORIZER = pickle.load(f)\n with open(d_file, 'rb') as f:\n CECTORIZER = pickle.load(f)\n return True\n\n return False", "def read_data(data_path):\n tr = data_path + 'train_vectors.txt'\n v = data_path + 'val_vectors.txt'\n tst = data_path + 'test_vectors.txt'\n return tr, v, tst", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def read_wikibrain_vecs(path):\n matrix = []\n with open(path, \"r\") as vecs:\n vecs.readline()\n for line in vecs:\n matrix.append(map(float, line.rstrip(\"\\n\").split(\"\\t\")))\n return matrix", "def get_data():\r\n if not path_validation(MODEL_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_DATA_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_LABEL_PATH, read_access=True):\r\n exit(0) \r\n\r\n params = joblib.load(MODEL_PATH)\r\n test_images = np.load(TEST_DATA_PATH)\r\n test_labels = np.load(TEST_LABEL_PATH)\r\n\r\n # Addition of bias in test set\r\n test_images = np.insert(test_images, 0, 1, axis=1)\r\n\r\n return params, test_images, test_labels", "def get_memes_data(path):\n data = load_files(path)\n memes = np.array(data['filenames'])\n return memes", "def readmodel(model = 'dominguez'):\n ebl_file_path = os.path.join(os.path.split(__file__)[0],'data/')\n\n if model == 'kneiske':\n file_name = join(ebl_file_path , 'ebl_nuFnu_tanja.dat')\n elif model == 'franceschini':\n file_name = join(ebl_file_path , 'ebl_franceschini.dat')\n elif model == 'dominguez':\n file_name = join(ebl_file_path , 'ebl_dominguez11.out')\n elif model == 'dominguez-upper':\n file_name = join(ebl_file_path , 'ebl_upper_uncertainties_dominguez11.out')\n elif model == 'dominguez-lower':\n file_name = join(ebl_file_path , 'ebl_lower_uncertainties_dominguez11.out')\n elif model == 'inoue':\n file_name = join(ebl_file_path , 'EBL_z_0_baseline.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_baseline.dat')\n elif model == 'inoue-low-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_low_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_low_pop3.dat')\n elif model == 'inoue-up-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_up_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_up_pop3.dat')\n elif model == 'gilmore':\n file_name = join(ebl_file_path , 'eblflux_fiducial.dat')\n elif model == 'gilmore-fixed':\n file_name = join(ebl_file_path , 'eblflux_fixed.dat')\n elif model == 'cuba':\n file_name = join(ebl_file_path , 'CUBA_UVB.dat')\n elif model == 'finke':\n file_name = join(ebl_file_path , 'ebl_modelC_Finke.txt')\n else:\n raise ValueError(\"Unknown EBL model chosen!\")\n\n data = np.loadtxt(file_name)\n if model.find('inoue') >= 0:\n z = np.array([0.])\n #z = data[0,1:]\n #nuInu = data[:,1]\n lmu = data[:,0]\n nuInu = np.array([data[:,1]]).T\n raise ValueError('Inoue models not correctly implemented at the moment, choose another model')\n\n elif model.find('gilmore') >= 0:\n z = data[0,1:]\n lmu = data[1:,0] * 1e-4 # convert from Angstrom to micro meter\n nuInu = data[1:,1:] \n nuInu[nuInu == 0.] = 1e-20 * np.ones(np.sum(nuInu == 0.))\n \n # convert from ergs/s/cm^2/Ang/sr to nW/m^2/sr\n nuInu = (nuInu.T * data[1:,0]).T * 1e4 * 1e-7 * 1e9 \n\n elif model == 'cuba':\n z = data[0,1:-1]\n lmu = data[1:,0] * 1e-4\n nuInu = data[1:,1:-1]\n\n # replace zeros by 1e-40\n idx = np.where(data[1:,1:-1] == 0.)\n nuInu[idx] = np.ones(np.sum(nuInu == 0.)) * 1e-20\n\n # in erg / cm^2 / s / sr\n nuInu = (nuInu.T * c.c.value / (lmu * 1e-6)).T \n nuInu *= 1e6 # in nW / m^2 / sr\n\n # check where lmu is not strictly increasing\n idx = np.where(np.diff(lmu) == 0.)\n for i in idx[0]:\n lmu[i+1] = (lmu[i + 2] + lmu[i]) / 2.\n\n else:\n z = data[0,1:]\n lmu = data[1:,0]\n nuInu = data[1:,1:]\n if model == 'finke': \n lmu = lmu[::-1] * 1e-4\n nuInu = nuInu[::-1]\n\n return EBL(z,lmu,nuInu, model = model)", "def load_data_from_fold(data_path):\r\n print(\"\\nLoading data from json folder {}\".format(data_path))\r\n\r\n SAMPLES_TO_CONSIDER = 22050\r\n\r\n data = preprocess_dataset(data_path, SAMPLES_TO_CONSIDER)\r\n\r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def read_files(data_loc, vector_size, window, epochs, min_df, max_df):\n\n class Data: pass\n\n speech = Data()\n\n print(\"-- train data\")\n speech.train_data, speech.train_fnames, speech.train_labels = read_tsv(data_loc, \"train.tsv\")\n print(len(speech.train_data))\n\n print(\"-- dev data\")\n speech.dev_data, speech.dev_fnames, speech.dev_labels = read_tsv(data_loc, \"dev.tsv\")\n print(len(speech.dev_data))\n\n print(\"-- test data\")\n test_data, test_fnames = read_unlabeled(data_loc, 'test')\n\n # print(\"-- unlabeled data\")\n # unlabeled_data, unlabeled_fnames = read_unlabeled(data_loc, 'unlabeled')\n # print(len(unlabeled_fnames))\n\n print(\"-- transforming data and labels\")\n speech.test_fnames = test_fnames\n\n train_docs = []\n train_analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, document in enumerate(speech.train_data):\n words = document\n tags = [\"train_\" + str(i)]\n train_docs.append(train_analyzedDocument(words, tags))\n dev_docs = []\n dev_analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, document in enumerate(speech.dev_data):\n words = document\n tags = [\"dev_\" + str(i)]\n dev_docs.append(dev_analyzedDocument(words, tags))\n test_docs = []\n test_analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, document in enumerate(test_data):\n words = document\n tags = [\"test_\" + str(i)]\n test_docs.append(test_analyzedDocument(words, tags))\n docs = train_docs + dev_docs + test_docs\n model = Doc2Vec(vector_size=vector_size, window=window, min_count=1, workers=8, epochs=epochs, negative=5, sample=1e-4)\n model.build_vocab(docs)\n model.train(docs, total_examples=model.corpus_count, epochs=model.epochs)\n print(model.most_similar('북한'))\n speech.train_doc_vec = np.zeros((4120, vector_size))\n speech.dev_doc_vec = np.zeros((4120, vector_size))\n speech.test_doc_vec = np.zeros((4120, vector_size))\n\n for i in range(4120):\n prefix_train = 'train_' + str(i)\n speech.train_doc_vec[i] = model[prefix_train]\n for i in range(4120):\n prefix_train = 'dev_' + str(i)\n speech.dev_doc_vec[i] = model[prefix_train]\n for i in range(4120):\n prefix_test = 'test_' + str(i)\n speech.test_doc_vec[i] = model[prefix_test]\n from sklearn import preprocessing\n speech.le = preprocessing.LabelEncoder()\n speech.le.fit(speech.train_labels)\n speech.target_labels = speech.le.classes_\n speech.trainy = speech.le.transform(speech.train_labels)\n speech.devy = speech.le.transform(speech.dev_labels)\n return speech", "def read_PSSM_data(self):\n\n names = os.listdir(self.pssm_path)\n fname = [n for n in names if n.find(self.molname)==0]\n\n if len(fname)>1:\n raise ValueError('Multiple PSSM files found for %s in %s',self.mol_name,self.pssm_path)\n if len(fname)==0:\n raise FileNotFoundError('No PSSM file found for %s in %s',self.mol_name,self.pssm_path)\n else:\n fname = fname[0]\n\n f = open(self.pssm_path + '/' + fname,'rb')\n data = f.readlines()\n f.close()\n raw_data = list( map(lambda x: x.decode('utf-8').split(),data))\n\n self.res_data = np.array(raw_data)[:,:3]\n self.res_data = [ (r[0],int(r[1]),r[2]) for r in self.res_data ]\n self.pssm_data = np.array(raw_data)[:,3:].astype(np.float)", "def read_data(path, batch_size, qp, frac, kernel, model):\n # load h5 file and get dictionaries\n inputs_dict, labels_dict, _ = get_dataset_dict(path, qp)\n\n # create training / validation dictionaries\n block_keys = [k for k in inputs_dict]\n train_inputs_dict, train_labels_dict, val_inputs_dict, val_labels_dict = (dict() for _ in range(4))\n\n # get inputs / labels for block & frac position\n for block in block_keys:\n inputs = inputs_dict[block][frac]\n\n # only use inputs that can be split 80 / 20 train / validation and fill out a batch\n split_percentage = 4/5\n if len(inputs) < batch_size / split_percentage:\n continue\n\n # if model contains non-linear activations, use same input & label size\n inputs = inputs[:, kernel:-kernel, kernel:-kernel, :] if \"scratch\" not in model else inputs\n\n labels = labels_dict[block][frac]\n\n # shuffle the pairs\n inputs, labels = array_shuffle(len(inputs), inputs, labels)\n\n # split 80 / 20\n (train_inputs, train_labels), (val_inputs, val_labels) = split_data(split_percentage, inputs, labels)\n\n # put into correct dictionary entry\n train_inputs_dict[block] = train_inputs\n train_labels_dict[block] = train_labels\n val_inputs_dict[block] = val_inputs\n val_labels_dict[block] = val_labels\n\n return train_inputs_dict, train_labels_dict, val_inputs_dict, val_labels_dict", "def read_data_set():\n # shapes of datasets -- [] means expanded form:\n # - X: J\n # - net.R: J [x J x 1]\n # - F_DIST: J x J x num_features\n # - F_DIST_w1: J x J x num_features\n # - w['except_first'][-1]: (last weights) J x num_features [x 1]\n # - w['except_first'][1:-1]: (second to last weights) J x J x num_features\n # - first weights **were** also J x J x num_features\n # - w['first_for_r']: J x 1 x num_features\n\n read_X()\n read_weights(read_FDIST())", "def load_data(m=5000, n=100, path='D:/file/vscode/py/data/mnist.npz'):\r\n f = np.load(path)\r\n x_train, y_train = f['x_train'], f['y_train']\r\n\r\n x_test, y_test = f['x_test'], f['y_test']\r\n\r\n f.close()\r\n return (x_train, y_train), (x_test, y_test)", "def test_brainvision_data():\n assert_raises(IOError, read_raw_brainvision, vmrk_path)\n assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,\n preload=True, scale=\"foo\")\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw_py = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_path, montage=montage,\n eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n assert_true('RawBrainVision' in repr(raw_py))\n\n assert_equal(raw_py.info['highpass'], 0.)\n assert_equal(raw_py.info['lowpass'], 250.)\n\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_py, times_py = raw_py[picks]\n\n # compare with a file that was generated using MNE-C\n raw_bin = Raw(eeg_bin, preload=True)\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_bin, times_bin = raw_bin[picks]\n\n assert_array_almost_equal(data_py, data_bin)\n assert_array_almost_equal(times_py, times_bin)\n\n # Make sure EOG channels are marked correctly\n for ch in raw_py.info['chs']:\n if ch['ch_name'] in eog:\n assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)\n elif ch['ch_name'] == 'STI 014':\n assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)\n elif ch['ch_name'] in raw_py.info['ch_names']:\n assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)\n else:\n raise RuntimeError(\"Unknown Channel: %s\" % ch['ch_name'])\n\n # test loading v2\n read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True,\n response_trig_shift=1000)", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def readEEGepoch(eegfilename, mainDir):\n # subject = 'ES9007' \n datapath = os.path.join(mainDir)\n os.chdir(datapath)\n \n folders = os.listdir(datapath)\n \n for dir in folders:\n \n os.chdir(os.path.join(datapath, dir))\n file = glob.glob(eegfilename)\n \n if file:\n print('>>>>>>>>>>>>> file loaded from >>>>>>>>>>>>>>>>>:', os.getcwd())\n filepath = os.path.join(os.getcwd(), eegfilename) \n dat = mne.read_epochs(filepath, preload=True) \n break \n return dat", "def read_model(input_file):\n with open(input_file) as inp:\n labels = inp.readline().strip().split(\" \")\n init_conc = np.array(list(map(float, inp.readline().strip().split(\" \"))))\n\n stoich = []\n for i in range(len(labels)):\n stoich.append(list(map(float, inp.readline().strip().split(\" \"))))\n S_matrix = np.array(stoich)\n\n educt = []\n for i in range(len(labels)):\n educt.append(list(map(float, inp.readline().strip().split(\" \"))))\n educt_matrix = np.array(educt)\n\n kin_par = np.array(list(map(float, inp.readline().strip().split(\" \"))))\n t_T, t_eval_step = list(map(float, inp.readline().strip().split(\" \")))\n\n return labels, init_conc, S_matrix, educt_matrix, kin_par, t_T, t_eval_step", "def _read_vee(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 2:\n size1, size2 = int(line[0]), int(line[1])\n vee = NP.zeros((size1, size1, size2, size2), dtype=NP.float64)\n elif len(line) == 5:\n mu, nu, lmda, sgma, val = int(line[0]) - 1, int(line[1]) - 1, int(line[2]) - 1, int(line[3]) - 1, NP.float64(line[4])\n vee[mu,nu,lmda,sgma] = \\\n vee[nu,mu,lmda,sgma] = \\\n vee[mu,nu,sgma,lmda] = \\\n vee[nu,mu,sgma,lmda] = \\\n vee[lmda,sgma,mu,nu] = \\\n vee[sgma,lmda,mu,nu] = \\\n vee[lmda,sgma,nu,mu] = \\\n vee[sgma,lmda,nu,mu] = \\\n val\n return vee", "def read_szf_fmv_12(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"land_frac\", uint_nan),\n (\"flagfield_gen2\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags(data)\n\n return data, metadata", "def read(self) :\n # Open the file.\n f = open(self.output, 'r')\n lines = f.readlines()\n \n # Find the eigenvalue.\n count = 0\n while True :\n words = lines[count].split()\n if len(words) == 5 :\n if words[0] == \"*\" and words[1] == \"K-EFF\":\n self.keff = float(words[3])\n break\n count += 1\n \n # Find the peaking.\n a = 0 # Assembly index\n \n while True :\n words = lines[count].split()\n if len(words) == 8 :\n if words[0] == \"NODE\" and words[1] == \"AVERAGE\" and words[2] == \"POWERS\" :\n count += 5 # Powers start 5 lines below title\n for row in range(0, self.dimension) :\n words = lines[count].split()\n assert(len(words) >= self.dimension)\n for col in range(0, self.dimension) :\n self.peaking_map[row, col] = float(words[col+1])\n if self.core.stencil[row, col] > 0:\n #print \" a=\", a, \" row=\", row, \" col=\", col, len(self.peaking)\n self.peaking[a] = self.peaking_map[row, col]\n a += 1\n count += 1\n break\n count += 1 \n # Maximum peaking.\n self.maxpeak = np.max(self.peaking)", "def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()", "def __init__(self, qmm, config, update_tof, rr_qe, path, meas_len, smearing, lsb=False):\n\n self.qmm = qmm\n self.config = config\n self.rr_qe = rr_qe\n self.num_of_states = 3\n self.path = path\n self.saved_data = None\n self.time_diff = None\n self.update_tof = update_tof\n self.finish_train = 0\n self.mu = dict()\n self.sigma = dict()\n self._load_file(path)\n self.lsb = lsb\n self.meas_len = meas_len\n self.smearing = smearing", "def load_breeze(self, breeze_path):\n self.breeze = pd.read_pickle(os.path.join(self.data_path, 'breeze.pick'))", "def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies", "def load_data(args, path=\"./project_data/\", dataset=\"paper_author.txt\"):\r\n print('Loading {} dataset...'.format(dataset))\r\n\r\n paper_author = make_graph(path+dataset)\r\n print('The number of nodes :', paper_author.number_of_nodes())\r\n adj = nx.adjacency_matrix(paper_author)\r\n\r\n # build symmetric adjacency matrix\r\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\r\n # features = normalize(features)\r\n adj = normalize(adj + sp.eye(adj.shape[0]))\r\n adj = sparse_mx_to_torch_sparse_tensor(adj)\r\n\r\n if args.model == 'adj':\r\n features = adj\r\n\r\n elif args.model == 'node2vec':\r\n print('Already exist Node2vec file')\r\n file_name = './Node2vec_walk_%s_num_walks_%s_truncated.pickle' % (str(args.walk_length), str(args.num_walks))\r\n if os.path.isfile(file_name):\r\n with open(file_name, 'rb') as file:\r\n features = pickle.load(file)\r\n else:\r\n node2vec = Node2Vec(graph=paper_author, # target graph\r\n dimensions=int(args.feature_node), # embedding dimension\r\n walk_length=int(args.walk_length), # number of nodes in each walks\r\n p=2, # return hyper parameter\r\n q=1, # inout parameter, q값을 작게 하면 structural equivalence를 강조하는 형태로 학습됩니다.\r\n weight_key=None, # if weight_key in attrdict\r\n num_walks=int(args.num_walks), \r\n workers=4,\r\n )\r\n features = torch.tensor(node2vec.fit(window=10, min_count=0).wv.vectors)\r\n with open(file_name, 'wb') as file:\r\n pickle.dump(features, file)\r\n return adj, features # , labels, idx_train, idx_val, idx_test\r", "def main():\n parser = argparse.ArgumentParser(description=\"Convert a checkpoint file into a support sets and a reconstructor \"\n \"weights files\")\n parser.add_argument('--exp', type=str, required=True, help=\"set experiment's model dir (created by `train.py`)\")\n\n # Parse given arguments\n args = parser.parse_args()\n\n # Check structure of `args.exp`\n if not osp.isdir(args.exp):\n raise NotADirectoryError(\"Invalid given directory: {}\".format(args.exp))\n models_dir = osp.join(args.exp, 'models')\n if not osp.isdir(models_dir):\n raise NotADirectoryError(\"Invalid models directory: {}\".format(models_dir))\n checkpoint_file = osp.join(models_dir, 'checkpoint.pt')\n if not osp.isfile(checkpoint_file):\n raise FileNotFoundError(\"Checkpoint file not found: {}\".format(checkpoint_file))\n\n print(\"#. Convert checkpoint file into support sets and reconstructor weight files...\")\n\n # Load checkpoint file\n checkpoint_dict = torch.load(checkpoint_file)\n\n # Get checkpoint iteration\n checkpoint_iter = checkpoint_dict['iter']\n print(\" \\\\__Checkpoint iteration: {}\".format(checkpoint_iter))\n\n # Save support sets weights file\n print(\" \\\\__Save checkpoint support sets weights file...\")\n torch.save(checkpoint_dict['support_sets'], osp.join(models_dir, 'support_sets-{}.pt'.format(checkpoint_iter)))\n\n # Save reconstructor weights file\n print(\" \\\\__Save checkpoint reconstructor weights file...\")\n torch.save(checkpoint_dict['reconstructor'], osp.join(models_dir, 'reconstructor-{}.pt'.format(checkpoint_iter)))", "def read_data(path):\n with h5py.File(path, 'r') as hf:\t\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_", "def read_szx_fmv_11(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\"sat_track_azi\"]\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath_indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"f_kp\", byte_nan), (\"f_usable\", byte_nan), (\"f_f\", uint_nan),\n (\"f_v\", uint_nan), (\"f_oa\", uint_nan), (\"f_sa\", uint_nan),\n (\"f_tel\", uint_nan), (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n return data, metadata", "def data_input(self):\n path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))\n if not os.path.isfile('{0}/{1}.csv'.format(path, self.data_file)):\n print 'Error: Dataset file is not exist.'\n exit()\n # Uplead Dataset.csv file.\n f = open('{0}/{1}.csv'.format(path, self.data_file), 'r')\n print 'Now uploading dataset File.....'\n f = list(f)\n # The Dataset contains heading, number of lines - heading\n self.number_of_VOCs = sum(1 for row in f)-1\n # Count number of columns, last column's value is empty, that is why -1.\n self.number_of_columns = len(f[0].split(',')) -1\n self.first_m_z = int(f[0].split(',')[3]) # find the first m/z value.\n self.last_m_z = int(f[0].split(',')[-2]) # find the last m/z value.\n print 'dataset includes ', self.number_of_VOCs, 'VOCs in all samples '\n print ('dataset includes ', self.number_of_columns, ' Columns, ',\n 'm/z values start from ', self.first_m_z,\n 'and end ', self.last_m_z)\n # Create a matrix with a shape of (number_of_VOCs X number_of_columns) filled with zeros.\n self.dataset = np.zeros((self.number_of_VOCs,\n self.number_of_columns))\n for line in range(1, len(f)):\n if int(float(f[line].strip().split(',')[0])) not in self.loaded_samples:\n self.loaded_samples.append(int(float(f[line].strip().split(',')[0])))\n for column in range(self.number_of_columns):\n self.dataset[line-1][column] = int(float(f[line].strip().split(',')[column]))", "def read_data_model(filename='data/data_model.pkl'):\n\n with open(filename, 'r') as pklfile:\n root = pkl.load(pklfile)\n\n return root", "def load_data(from_stored_data=False):\n\n if from_stored_data:\n #data_X = pickle.load(open(file_X, \"rb\"))\n data_X = pickle.load(open(\"x_sparse_small.p\", \"rb\"))\n #data_Y = pickle.load(open(file_Y, \"rb\"))\n data_Y = pickle.load(open(\"y_sparse_small.p\", \"rb\"))\n return data_X, data_Y\n\n data_X = None\n data_Y = None\n\n for num_subject in range(num_subjects):\n print \"subject :\", str(num_subject+1), \" processing started \"\n ind_data_x = None\n ind_data_y = None\n \n subject_data = sio.loadmat(\"data/data-science-P\" + str(num_subject + 1) + \".mat\")\n\n # big three headers\n meta = subject_data.get(\"meta\")\n info = subject_data.get(\"info\")[0]\n trials = subject_data.get(\"data\")\n\n # meta data\n nvoxels = meta[\"nvoxels\"][0][0][0][0]\n colToCoord = meta[\"colToCoord\"][0][0]\n coordToCol = meta[\"coordToCol\"][0][0]\n for num_trial in range(len(trials)):\n sys.stdout.write(str(num_trial)+\" \")\n sys.stdout.flush()\n # create feature vectors\n voxels = trials[num_trial][0][0]\n #feature_vec = np.zeros(dim_x * dim_y * dim_z)\n feature_vec = np.zeros((dim_x_half, dim_y, dim_z))\n for i in range(len(voxels)):\n # save only the left of the voxels to decrease the dimension of the vector \n colInfo = colToCoord[i, :]\n x = colInfo[0] - 1 # index in data starts from 1\n y = colInfo[1] - 1 # same\n z = colInfo[2] - 1 # same\n if x < dim_x_half:\n feature_vec[x][y][z] = voxels[i]\n #feature_vec[z * (dim_x * dim_y) + y * dim_x + x] = voxels[i]\n #feature_vec[z * (dim_x_half * dim_y) + y * dim_x_half + x] = voxels[i]\n feature_vec = feature_vec.flatten()\n feature_vec = sp.csr_matrix(feature_vec)\n\n # create label vectors\n trial_info = info[num_trial]\n cond_number = trial_info[1][0][0] - 2 # starts from 2 (2 ~ 13)\n word_number = trial_info[3][0][0] - 1 # starts from 1 (1 ~ 5)\n label_vec = np.zeros(num_conds * num_words_per_cond)\n label_vec[cond_number * num_words_per_cond + word_number] = 1\n \n # append data\n #data_X = sp.vstack((data_X, feature_vec)) if data_X is not None else feature_vec\n #data_Y = np.vstack((data_Y, label_vec)) if data_Y is not None else label_vec\n ind_data_x = sp.vstack((ind_data_x, feature_vec)) if ind_data_x is not None else feature_vec\n ind_data_y = np.vstack((ind_data_y, label_vec)) if ind_data_y is not None else label_vec\n\n # save ind_data files\n pickle.dump(ind_data_x, open(\"ind_\"+str(num_subject+1)+\"_x\", \"wb\"))\n pickle.dump(ind_data_y, open(\"ind_\"+str(num_subject+1)+\"_y\", \"wb\"))\n\n print \"subject :\", str(num_subject+1), \" processing done \"\n \n # save data file\n #pickle.dump(data_X, open(file_X, \"wb\"))\n #pickle.dump(data_Y, open(file_Y, \"wb\"))\n\n return data_X, data_Y", "def read():\n\n # load json and create model\n base_model = _model_builder.Network(0, model_type=\"load_model\")\n\n #load image and process\n digit = Image.open(\"./data/number.jpg\").convert(\"L\")\n digit = ImageOps.expand(digit,border=60,fill='black')\n digit = digit.resize((28, 28))\n\n #flatten the matrix (for input into MLP network todo:CNN)\n digit_flat = numpy.zeros((1, 784))\n counter = 0\n for j in range(0, 28):\n for i in range(0, 28):\n digit_flat[0][counter] = (digit.getpixel((i, j)))/255.0\n counter = counter+1\n\n #predict\n os.system('clear')\n base_model.predict(digit_flat)", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n return data", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def Subtask4_pre_train_5():\n with open(PATH + 'pre_train_4_Subtask4.txt', encoding='utf-8') as fi:\n evi = eval(fi.read())\n\n train_data = np.load(PATH + 'pre_train_2_Subtask4.npy', allow_pickle=True).item()\n model = word2vec.KeyedVectors.load_word2vec_format(PATH + \"data/GoogleNews-vectors-negative300.bin\", binary=True)\n\n with open(PATH + 'pre_train_3_Subtask4.txt', encoding='utf-8') as f:\n document = eval(f.read())\n\n with open(PATH + 'traindata_Subtask4.txt', 'w') as fp:\n for data in train_data.items():\n claim = data[0]\n claim = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", claim)\n claim = claim.split(' ')\n claim = list(filter(lambda x: x in model.vocab, claim))\n Vi = []\n for i in range(len(claim)):\n Vi.append(model[claim[i]])\n\n V = np.zeros(len(Vi[0]))\n for i in range(len(claim)):\n for j in range(len(Vi[0])):\n V[j] = V[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V[i] * V[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V[i] = V[i] / rms\n V = V.astype(str).tolist()\n\n for doc in data[1]:\n lines = document[doc].split('\\n')\n for k in range(len(lines)):\n label = [data[0], doc, k]\n line = document[doc].split('\\n')[k]\n if line != str(k) + '\\t':\n line = line.replace(str(k) + '\\t', '')\n line = line.split('\\t')[0]\n line = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", line)\n line = line.split(' ')\n line = list(filter(lambda x: x in model.vocab, line))\n if len(line) != 0:\n Vi = []\n for i in range(len(line)):\n Vi.append(model[line[i]])\n\n V1 = np.zeros(len(Vi[0]))\n for i in range(len(line)):\n for j in range(len(Vi[0])):\n V1[j] = V1[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V1[i] * V1[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V1[i] = V1[i] / rms\n V1 = V1.astype(str).tolist()\n\n if label in evi:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 1' + '\\n')\n else:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 0' + '\\n')", "def read_word2vec_model():\n file_name = \"word2vec_model.txt\"\n # these are the pre-2018 lines to load a model:\n # from gensim.models.word2vec import Word2Vec\n # m = Word2Vec.load_word2vec_format(file_name, binary=False)\n \n # here are the post-2018 lines to load a model:\n from gensim.models import KeyedVectors\n print(\"Starting to load the model in \", file_name, \"...\")\n m = KeyedVectors.load_word2vec_format(file_name, binary=False)\n print(\"Model loaded.\\n\")\n\n print(\"The model built is\", m, \"\\n\")\n print(\"m.vocab has\", len(m.vocab), \"words\")\n ## The above line should print\n ## m.vocab has 43981 words\n\n print(\"Each word is a vector of size\", m.vector_size)\n ## which should tells us that each word is represented by a 300-dimensional vector\n\n print(\"\\nTry m.get_vector('hello') to see one...!\\n\")\n ## Once the model is built, it can't be changed without rebuilding it; we'll leave it. \n\n return m", "def read_data_test(path):\n with h5py.File(path, 'r') as hf:\n input_ = np.array(hf.get('data'))\n label_ = np.array(hf.get('label'))\n\t\n return input_, label_", "def get_model_data_from_files(self, oc):\r\n # Load model related files\r\n model_path = self.config['DATA_PATH'] + self.config['CUSTOMER_NAME'] + '/models/'\r\n\r\n features_file = model_path + self.task + '_' + str(oc) + '_features.txt'\r\n dummies_file = model_path + self.task + '_' + str(oc) + '_dummies.txt'\r\n model_file = model_path + self.task + '_' + str(oc) + '.joblib'\r\n\r\n if os.path.isfile(features_file) and os.path.isfile(dummies_file) and os.path.isfile(model_file):\r\n model = joblib.load(model_file)\r\n features = open(features_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n dummies = open(dummies_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n return (model, features, dummies)\r\n return (None, None, None)", "def read_nc(self, fname=None):\n fname = fname if fname else self.fname\n\n super(EigFile, self).read_nc(fname)\n\n with nc.Dataset(fname, 'r') as root:\n\n # nspin, nkpt, nband\n self.EIG = root.variables['Eigenvalues'][:,:,:] \n\n # nkpt, 3\n self.Kptns = root.variables['Kptns'][:,:]", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def model_input(step=0.1):\n model = np.loadtxt('start_model.dat',dtype={'names': ('H', 'VP','VS','RHO','QP','QS',\\\n 'ETAP','ETAS','FREFP','FREFS'),'formats': ('f4', 'f4','f4','f4',\\\n 'f4','f4','f4','f4','f4','f4')}, skiprows=1)\n \n f = open('model96_input.tmp', 'w+')\n f.write('model_step96.in\\nIsotropic model\\n0\\n')\n d = np.loadtxt('cumul_depths.tmp')\n for i in np.arange(len(d)):\n for k, s in enumerate(np.arange(0,d[-1],step)):\n if s < d[i] and i==0:\n f.write('%s %s %s %s %s %s %s %s %s %s\\n'\n %(step, model['VP'][i], model['VS'][i], model['RHO'][i],\\\n model['QP'][i], model['QS'][i], model['ETAP'][i], model['ETAS'][i],\\\n model['FREFP'][i], model['FREFS'][i]))\n if i > 0:\n if s < d[i] and s > d[i-1]:\n f.write('%s %s %s %s %s %s %s %s %s %s\\n'\n %(step, model['VP'][i], model['VS'][i], model['RHO'][i],\\\n model['QP'][i], model['QS'][i], model['ETAP'][i], model['ETAS'][i],\\\n model['FREFP'][i], model['FREFS'][i]))\n f.close()\n os.system(\"mkmod96 < model96_input.tmp\")\n print (\">> Model_step96.in is ready... next step is comp_disp.bash\")\n print ('>> nlayers =', k)\n print (\">> Change nlayer in eigenfucntion_*.bash!!!!\")\n print (\">> mkmod96\")", "def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)", "def read_mb_file(self,idir='.',ifile=None, gmt=True, verbose=False):\n \n import numpy as np\n import os\n \n if gmt==True:\n gmt_file=idir+'/../maps/en_velo.gmt'\n if isinstance(gmt,str):\n gmt_file=gmt\n \n if gmt != False:\n self.read_lon_lat(gmt_file,verbose=verbose)\n \n if ifile is None:\n mb_file_basename= idir + '/mb_'+self.code+'_GPS.dat'\n else:\n mb_file_basename=ifile\n \n data_NEU = []\n for i in range(1,4):\n mb_file = mb_file_basename + str(i)\n\n # file\n self.ifile=os.path.abspath(mb_file)\n \n data=np.genfromtxt(mb_file,skip_header=4)\n \n # reshape to ensure a 2D array\n if len(data.shape)==1:\n data=data.reshape((1,data.shape[0]))\n \n\n\n data_NEU.append(data)\n\n if data_NEU[0].shape == data_NEU[1].shape == data_NEU[2].shape:\n self.data=np.zeros((data_NEU[0].shape[0],7))\n self.data[:,0]=data_NEU[0][:,0]\n self.data[:,1]=data_NEU[0][:,1]#*to_mm\n self.data[:,2]=data_NEU[1][:,1]#*to_mm\n self.data[:,3]=data_NEU[2][:,1]#*to_mm\n\n self.data[:,4]=data_NEU[0][:,2]#*to_mm\n self.data[:,5]=data_NEU[1][:,2]#*to_mm\n self.data[:,6]=data_NEU[2][:,2]#*to_mm\n\n else: \n print(\"!!! Error reading \",mb_file_basename,\" :*dat1, *dat2, *dat3 do not have the same length\")\n self.data = None", "def train (X, Y, modelfile='Q2/models/gaussianBinary.model', gamma=0.05, showProgress=False):\n tick = time.time()\n X = np.matrix(X)\n Y = np.matrix(Y).T\n\n m, n = X.shape\n\n # Find the Kernel Matrix KM\n KM = gaussianKM (X, X, gamma)\n\n # Parameters for CVXOPT\n YQ = Y * Y.T\n Q = np.multiply (YQ, KM)\n p = np.matrix(-np.ones((m, 1)))\n G = np.matrix(np.vstack( (-np.identity(m), np.identity(m)) ))\n h = np.matrix(np.vstack( (np.zeros((m,1)), np.ones((m,1))) ))\n A = Y.T\n b = 0\n \n # Running CVXOPT\n Q = cvx.matrix(Q)\n p = cvx.matrix(p)\n G = cvx.matrix(G)\n h = cvx.matrix(h)\n A = cvx.matrix(A, (1, m), 'd')\n b = cvx.matrix(b, (1,1), 'd')\n cvx.solvers.options['show_progress'] = showProgress\n sol = cvx.solvers.qp(P=Q, q=p, G=G, h=h, A=A, b=b)\n\n # Alphas\n alphas = np.matrix(sol['x'])\n\n # Finding the bias\n def findBias ():\n epsilon = 1e-5\n for idx, alp in enumerate(alphas):\n if (alp - 0 > epsilon and 1 - alp > epsilon):\n KM = gaussianKM (X[idx], X[idx], gamma)\n AlphaY = np.multiply (alphas, Y)\n AlphaY = np.repeat(AlphaY, 1, axis=1)\n KMalphaY = np.multiply (KM, AlphaY)\n KMalphaY = np.sum(KMalphaY, axis=0)\n b = float (Y[idx, 0] - KMalphaY)\n return b\n \n b = findBias ()\n\n # Finding the support vectors\n if (showProgress):\n epsilon = 1e-5\n sv = []\n for idx, alp in enumerate(alphas):\n if (alp - 0 > epsilon and 1 - alp > epsilon):\n sv.append(alp)\n with open('Q2/support-vectors/gaussian.vectors', 'w') as f:\n for v in sv:\n f.write(\"%.3f\\n\" % v)\n print (\"Number of Support Vectors: \", len(sv))\n # else:\n\n epsilon = 1e-5\n nSV = np.sum( (np.array(alphas) > epsilon) & (np.array(alphas) < 1 - epsilon), axis=0)\n print (\"Number of Support Vectors: \", nSV)\n\n # Saving the model\n model = (alphas, b)\n with open(modelfile, 'wb') as handle:\n pickle.dump(model, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n print (\"Time taken for gaussian CVXOPT training: \", time.time() - tick)", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_train = np.load(\"data/X_train.npy\")\n\t\t\t\tX_val = np.load(\"data/X_val.npy\")\n\t\t\t\tY_train = np.load(\"data/Y_train.npy\")\n\t\t\t\tY_val = np.load(\"data/Y_val.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tdata_temp = np.zeros((50000,64,64,3))\n\t\t\t\tlabel_temp = []\n\n\t\t\t\tfor i in range(5):\n\n\t\t\t\t\tfile = path + str(i+1)\n\t\t\t\t\twith open(file, 'rb') as fo:\n\t\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\t\tlabel_temp.extend(temp_element[b'labels'])\n\n\t\t\t\t\tfor j in range(10000):\n\t\t\t\t\t\tdata_temp[j+(i*10000)] = self._reshape(temp_data[j])\n\n\t\t\t\tlabel_temp = np.eye(10)[np.array(label_temp)]\n\n\t\t\t\tnp.random.seed(123)\n\t\t\t\tpermutations = list(np.random.permutation(50000))\n\t\t\t\tX = data_temp[permutations, :, : , :] \n\t\t\t\tY = label_temp[permutations, :]\n\t\t\t\tX_train = X[0:40000, :, :, :] \n\t\t\t\tY_train = Y[0:40000, :]\n\t\t\t\tX_val = X[40000:50000, :, :, :] \n\t\t\t\tY_val = Y[40000:50000, :]\n\n\t\t\t\tnp.save(\"./data/X_train\", X_train)\n\t\t\t\tnp.save(\"./data/X_val\", X_val)\n\t\t\t\tnp.save(\"./data/Y_train\", Y_train)\n\t\t\t\tnp.save(\"./data/Y_val\", Y_val)\n\t\t\t\tbreak\n\n\t\treturn X_train, X_val, Y_train, Y_val", "def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def _load_training_data(self):\n self._save_training_data()", "def read_szx_fmv_13(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"abs_line_number\"\n ]\n\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"num_val_trip\", ulong_nan), (\"f_kp\", byte_nan),\n (\"f_usable\", byte_nan), (\"land_frac\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n data[\"swath_indicator\"] = data.pop(\"swath indicator\")\n\n data[\"f_land\"] = data.pop(\"land_frac\")\n\n return data, metadata", "def load(self):\n results = pd.read_csv(f'data/binomial_experiment_{self.margin}_C_{self.C}.csv')\n\n self.minimal_R = np.zeros((len(self.Ms), self.estimations, len(self.epsilons)))\n for i, M in enumerate(self.Ms):\n for l in range(self.estimations):\n self.minimal_R[i,l,:] = np.array(results[results['M'] == M])[l,2:]\n return", "def execute(cf):\n\n ##Ports and parameters\n train_set = cf.get_input(\"train_set\") #training set. Typically even_file\n test_set = cf.get_input(\"test_set\") #test set. Typically odd_file\n WM1 = cf.get_input(\"WM1\")\n WM2 = cf.get_input(\"WM2\")\n WM3 = cf.get_input(\"WM3\")\n WM4 = cf.get_input(\"WM4\")\n WM5 = cf.get_input(\"WM5\")\n WM6 = cf.get_input(\"WM6\")\n WM7 = cf.get_input(\"WM7\")\n WM8 = cf.get_input(\"WM8\")\n WM9 = cf.get_input(\"WM9\")\n WM10 = cf.get_input(\"WM10\")\n WM11 = cf.get_input(\"WM11\")\n WM12 = cf.get_input(\"WM12\")\n WM13 = cf.get_input(\"WM13\")\n WM14 = cf.get_input(\"WM14\")\n WM15 = cf.get_input(\"WM15\")\n WM16 = cf.get_input(\"WM16\")\n WM17 = cf.get_input(\"WM17\")\n WM18 = cf.get_input(\"WM18\")\n WM19 = cf.get_input(\"WM19\")\n WM20 = cf.get_input(\"WM20\")\n WMdir = cf.get_input(\"WMdir\")\n WMdir2 = cf.get_input(\"WMdir2\")\n basefreqs = cf.get_input(\"BaseFrequencies\")\n ufemodel_path = cf.get_input(\"UFEmodel\")\n\n bestWM = cf.get_output(\"BestWM\")\n log_file = cf.get_output(\"log_file\")\n interm = cf.get_output(\"intermediate\")\n\n genome = cf.get_parameter('genome', 'string')\n motevo_path = cf.get_parameter('motevo_path', 'string')\n aligned = cf.get_parameter(\"aligned\", \"boolean\")\n\n os.mkdir(interm)\n\n\n\n # Read stuff in\n WMs = [i for i in[WM1, WM2, WM3, WM4, WM5, WM6, WM7, WM8, WM9, WM10, WM11, WM12, WM13, WM14, WM15, WM16, WM17, WM18, WM19, WM20] if i]\n\n if WMdir:\n WMs += [os.path.join(WMdir, wm) for wm in os.listdir(WMdir)]\n\n if WMdir2:\n WMs += [os.path.join(WMdir2, wm) for wm in os.listdir(WMdir2)]\n\n f = open(basefreqs)\n ATfreq = float(f.readline().strip().split()[1])\n GCfreq = float(f.readline().strip().split()[1])\n f.close()\n\n\n # Compute stuff: optimal priors and then likelihood of test set\n optpriors = []\n logliks = []\n\n for i, WM in enumerate(WMs):\n\n wmlen = len(open(WM).readlines())-4\n\n # 1. Fit prior on training set with EM\n tag = 'fitP_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=1, bgorder=0, bgprior=0.99)\n r = runMotevo(motevo_path, train_set, params, WM, interm, tag)\n if r != 0:\n print 'motevo failed ', tag\n sys.exit(1)\n\n # prior file:\n # WM_name final_prior nr_of_sites density\n # /import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/PipeLineSource/TESTRUN/NRF1_Z2/OUTPUT/NRF1_FgBg-runmotevoPG2_1/Logo 0.016554 635.008 0.251863\n # background 0.983446 37724.8 0.748137\n # UFEwm 0 0 0\n\n optprior = float(open(priors).readlines()[1].split()[1])\n bgprior=(1-optprior)\n print bgprior\n\n # 2. Compute log-likelihood on test set with optimal prior from training set and without EM\n tag = 'compLL_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=0, bgorder=0, bgprior=bgprior)\n runMotevo(motevo_path, train_set, params, WM, interm, tag)\n\n a = loadtxt(loglikfile, usecols=[1])\n ll = sum(a)\n\n logliks.append(ll)\n optpriors.append(optprior)\n\n print logliks\n\n\n\n #replace name in WM file with bestWM\n lines = open(WMs[argmax(logliks)]).readlines()\n lines[1] = 'NA BestWM\\n'\n bwm = open(bestWM, 'w')\n bwm.write(''.join(lines))\n\n\n l = open(log_file, 'w')\n\n l.write('WM_name\\tWM_path\\tlog_likelihood\\topt_prior\\n')\n\n names = ['WM_%i\\t%s\\t%.4f\\t%s' %(i+1, WMs[i], logliks[i], optpriors[i]) for i in arange(len(WMs))]\n\n l.write('\\n'.join(names))\n l.close()\n\n\n return 0", "def test_large_flmb(self):\n test_files_218 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-218*.mdd')\n\n mdd.procall(test_files_218)\n\n data_orig = self.read_full_file('node59p1.dat')\n\n # two status files from different controllers, 12371 and 12365\n data_out_71 = self.read_full_file('node59p1_0.status_1237101.dat')\n self.check_sio_type(data_out_71, ['CS', 'PS'])\n data_out_65 = self.read_full_file('node59p1_0.status_1236501.dat')\n self.check_sio_type(data_out_65, ['CS', 'PS'])\n data_out = data_out_71\n data_out += data_out_65\n\n data_adcps = self.read_full_file('node59p1_0.adcps_1237111.dat')\n self.check_sio_type(data_adcps, ['AD'])\n data_out += data_adcps\n\n data_ctdmo = self.read_full_file('node59p1_0.ctdmo_1237100.dat')\n self.check_sio_type(data_ctdmo, ['CT', 'CO'])\n data_out += data_ctdmo\n\n data_dosta = self.read_full_file('node59p1_0.dosta_1236501.dat')\n self.check_sio_type(data_dosta, ['DO'])\n data_out += data_dosta\n\n data_flort = self.read_full_file('node59p1_0.flort_1236501.dat')\n self.check_sio_type(data_flort, ['FL'])\n data_out += data_flort\n\n data_phsen = self.read_full_file('node59p1_0.phsen_1236501.dat')\n self.check_sio_type(data_phsen, ['PH'])\n data_out += data_phsen\n\n if not TestSioUnpack.compare_sio_matches(data_orig, data_out):\n self.fail(\"Failed sio block compare\")\n\n test_files = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-205*.mdd')\n test_files_217 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-217*.mdd')\n test_files_219 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-219*.mdd')\n\n test_files.extend(test_files_217)\n test_files.extend(test_files_219)\n\n mdd.procall(test_files)\n\n data_out = self.compare_node59(1, data_out)\n\n test_files = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-233*.mdd')\n test_files_231 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-231*.mdd')\n\n test_files.extend(test_files_231)\n\n mdd.procall(test_files)\n\n self.compare_node59(2, data_out)", "def load_from_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.load_from_disk(file_name)", "def read_file(self, filename=None):\n print(f'reading file')\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read second line in file to get number of variables\n f.seek(0)\n f.readline()\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= (nvars_file + 2)\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i+1\n except KeyError:\n pass\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n base_r[i] = variables[2]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[i, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)", "def load_e_form():\n path = os.path.join(DATA_DIR, \"eform-materialsproject-85014.csv\")\n df = pd.read_csv(path, index_col=\"mpid\")\n return df", "def main(workdir):\n dir = os.path.expanduser(workdir)\n \n #read the .dat file\n f = open('{}smi.dat'.format(dir))\n par = imp.load_source('par', '', f)\n \n #make a sdf file for visualization\n output = pybel.Outputfile(\"sdf\", dir + \"species.sdf\",overwrite=True)\n for name in par.smiles:\n smi = par.smiles[name]\n obmol = pybel.readstring(\"smi\",smi)\n output.write(obmol)\n output.close()\n \n #list with the jobs that need to be done\n jobs = []\n \n #iterate the input files\n for name in par.smiles:\n #name = input_file.replace('.inp','') #name of the calculation\n test_dir = dir + name #location where the calculations will be done\n if not os.path.exists(test_dir):\n os.mkdir(test_dir)\n \n #copy the input file to the working directory\n write_input_file(par,name,par.smiles[name],test_dir + '/input.inp')\n job = workdir + name + '/'\n jobs.append(job)\n \n run_threads(jobs, 'eric', max_running = 3)", "def unpack(self, buff, verbose=0):\n\n\n # See https://docs.python.org/3/library/struct.html#struct.pack\n # for struck pack format\n\n # Local methods to unpack numbers in little-endian format\n idx={'x':0}\n\n def read_uint8():\n idx['x']+=1\n return struct.unpack('<B', buf[idx['x']-1:idx['x']])[0]\n def read_uint32():\n idx['x']+=4\n return struct.unpack('<I', buf[idx['x']-4:idx['x']])[0]\n def read_float32():\n idx['x']+=4\n return struct.unpack('<f', buf[idx['x']-4:idx['x']])[0]\n\n # Return empty model in case the byte-array contains no information\n if len(buf) == 0:\n return None\n\n # Read global stddev and mean (not used in RQRMI version 1.1)\n _=read_float32()\n _=read_float32()\n\n num_of_stages=read_uint32()\n _log(verbose, 'Num of stages: %d' % num_of_stages)\n\n # Preallocate array\n trained_rqrmi=[None for _ in range(num_of_stages)]\n\n for s in range(num_of_stages):\n\n # Read the current stage\n num_of_models=read_uint32()\n\n _log(verbose, '\\nStage %d num of models: %d' % (s, num_of_models))\n\n # Preallocate net_list\n net_list=[None for _ in range(num_of_models)]\n\n for m in range(num_of_models):\n # Read version\n version=read_uint8()\n if version==0:\n _log(verbose, '\\nSkipping model <%d,%d>: model not compiled' % (s, m))\n continue\n elif version!=2:\n _log(verbose, '\\nUnsupported version for model <%d,%d>' % (s, m))\n continue\n\n _log(verbose, '\\nLoading model <%d, %d>: ' % (s,m))\n\n # Read model parameters\n mu=read_float32()\n sig=read_float32()\n fac=read_float32()\n omin=read_float32()\n num_of_layers=read_uint32()\n _log(verbose, 'layers: %d, ' % num_of_layers)\n\n # Preallocate net values\n net_values=[None for _ in range(2*num_of_layers-1)]\n\n # Read network structure\n structure=[None for _ in range(num_of_layers)]\n for l in range(num_of_layers):\n structure[l]=read_uint32()\n\n # Layer 0 bias\n net_values[0]=np.empty(structure[0])\n\n # Preallocate all other layers\n for l in range(1, num_of_layers):\n net_values[2*l-1]=np.empty(structure[l]) # Layer bias\n net_values[2*l-0]=np.empty([structure[l-1], structure[l]]) # Layer weights\n\n _log(verbose, 'structure: [%s]' % ','.join([str(x) for x in structure]))\n\n # Read values of first layer\n net_values[0][0]=read_float32()\n _=read_float32() # First layer weight is one (always)\n\n # Read values\n for l in range(1, num_of_layers):\n # Read bias\n for i in range(structure[l]):\n net_values[2*l-1][i]=read_float32()\n # Read weights\n for y in range(structure[l-1]):\n for x in range(structure[l]):\n net_values[2*l][y,x]=read_float32()\n\n # Update stage's net list\n net_list[m]=(mu, sig, fac, omin, net_values)\n\n # Update output with stage\n trained_rqrmi[s] = net_list\n\n # Read the maximum error of each last stage submodel\n self.error_list = []\n for e in range(len(self.trained_rqrmi[-1])):\n self.error_list.append(read_uint32())\n\n _log(verbose, '\\n')\n self.trained_rqrmi = trained_rqrmi", "def load_model(self, path):\n self._saver.restore(self._sess, path + '/model.ckp')\n pkl_file = open(path + '/som.pkl', 'rb')\n restored = pickle.load(pkl_file)\n pkl_file.close()\n self._m = restored['_m']\n self._n = restored['_n']\n self._neighbourhood = restored['_neighbourhood']\n # self._topography = restored['_topography']\n self._num_iterations = restored['_num_iterations']\n self._Wts = restored['_Wts']\n self._locations = restored['_locations']\n self._learned = restored['_learned']\n self._centroid_grid = restored['_centroid_grid']\n self.abnormal_dist = restored['abnormal_dist']\n\n print(\"Model restored from path: \" + path)", "def learn(filePath):\n filename = filePath.stem\n processedJAFFE = load(str(filePath))\n processedDF = pd.DataFrame(processedJAFFE)\n processedDF.columns = ['name', 'data', 'emotion']\n processedDF = processedDF.sort_values(by=['name', 'emotion'])\n grouped = processedDF.groupby(['name', 'emotion'])\n train = grouped.nth([0, 1])\n test = grouped.nth([2, 3, 4])\n\n yTrain = train.index.get_level_values(1).tolist()\n xTrain = train.values.ravel().tolist()\n yTest = test.index.get_level_values(1).tolist()\n xTest = test.values.ravel().tolist()\n\n parameters = {\n 'C': [\n 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08, 1.00E-07, 1.00E-06,\n 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02, 1.00E-01, 1.00,\n 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05\n ],\n 'gamma': [\n 1.00E00,\n 1.00E-01,\n 1.00E-02,\n 1.00E-03,\n 5.00E-04, 2.00E-04, 1.50E-04, 1.10E-04, 1.05E-04, 1.00E-04,\n 9.50E-05, 9.00E-05, 7.00E-05, 5.00E-05, 1.90E-05, 1.00E-05,\n 1.00E-06,\n 1.00E-07,\n ],\n }\n\n models = []\n models.append(['gamma \\\\ C', 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08,\n 1.00E-07, 1.00E-06, 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02,\n 1.00E-01, 1.00, 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05 ])\n gridTimeStart = time()\n numIteration = len(parameters['gamma']) * len(parameters['C'])\n iteration = 0\n meanTime = 0\n for gamma in parameters['gamma']:\n row = [gamma]\n for C in parameters['C']:\n print('C = %s \\t gamma = %s'%(C, gamma))\n timeStart = time()\n svc = OneVsRestClassifier(SVC(random_state=0, decision_function_shape='ovr',\n C=C, kernel='rbf', gamma=gamma), n_jobs=4)\n svc.fit(xTrain, yTrain)\n yTrue, yPred = yTest, svc.predict(xTest)\n yTrue = np.array(yTrue, dtype=np.unicode_)\n yPred = np.array(yPred, dtype=np.unicode_)\n correct = np.sum(yTrue == yPred)\n \n print(\"accuracy: %d/%d = \"%(correct, len(yTrue)),\n D('%.2f'%(correct/len(yTrue)*100)))\n row.append(D('%.2f'%(correct/len(yTrue)*100)))\n \n iterTime = time()-timeStart\n iteration = iteration + 1\n meanTime = meanTime * (iteration-1)/iteration + iterTime/iteration\n remainingTime = (numIteration-iteration)*meanTime\n print('--------------------------(%d sec)--remaining: %s'%\n (iterTime, str(timedelta(seconds=int(remainingTime)))))\n models.append(row)\n gridTime = time() - gridTimeStart\n gridTime = timedelta(seconds=int(gridTime))\n print('time: %s'%str(gridTime))\n print('saving file: %s.csv'%filename)\n with open('../csv/%s.csv'%filename, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerows(models)", "def download_data(dev_mode: str, model: word2vec.Word2Vec) -> (np.ndarray, np.ndarray):\n assert dev_mode.lower() == 'false' or dev_mode.lower() == 'true'\n \n if dev_mode.lower() == 'false':\n print('Using Actual Data...')\n data_path = os.path.join(args.data_dir, 'HIV.csv')\n df = pd.read_csv(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(Chem.MolFromSmiles(x['smiles']), 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['HIV_active'].astype(int))\n else:\n # use example data set\n data_path = os.path.join(args.data_dir, 'ames.sdf')\n df = PandasTools.LoadSDF(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(x['ROMol'], 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['class'].astype(int))\n \n return X,y", "def retrieve_additional_files(input_qchem, data_fchk, work_dir, scratch_read_level=0):\n\n additional_data = {}\n\n natom = len(input_qchem.molecule.get_coordinates())\n file_list = os.listdir(work_dir)\n\n # OLD_DIMENSIONS\n if '819.0' in file_list:\n with open(work_dir + '819.0', 'r') as f:\n data = np.fromfile(f, dtype=np.int32)\n norb_alpha, norb_beta = data[0:2]\n norb = norb_alpha\n nbas = norb # assumption\n else:\n norb = np.shape(data_fchk['coefficients']['alpha'])[0]\n nbas = np.shape(data_fchk['coefficients']['alpha'])[1]\n\n\n # MO_COEFS (Already in fchk) in internal order\n if '53.0' in file_list and 'coefficients' in data_fchk:\n with open(work_dir + '53.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n mo_alpha = data[:norb*nbas].reshape(-1, norb).tolist()\n mo_beta = data[norb*nbas: 2*norb_beta*nbas].reshape(-1, norb_beta).tolist()\n # additional_data['coefficients_internal'] = {'alpha': mo_alpha, 'beta': mo_beta}\n\n # obtain the order indices between fchk order and Q-Chem internal order of basis functions\n diff_square = get_sdm(data_fchk['coefficients']['alpha'], mo_alpha)\n\n # get non-repeating indices\n indices = []\n for row in diff_square.T:\n for i in np.argsort(row):\n if i not in indices:\n indices.append(int(i))\n break\n\n # indices = np.argmin(diff_square, axis=0).tolist()\n\n # store q-chem index order for later use (e.g guess)\n data_fchk['coefficients']['qchem_order'] = indices\n else:\n indices = list(range(nbas))\n\n # FOCK_MATRIX\n if '58.0' in file_list:\n with open(work_dir + '58.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n fock_alpha = data[:nbas*nbas].reshape(-1, nbas)\n fock_beta = data[nbas*nbas: 2*nbas*nbas].reshape(-1, nbas)\n\n # set basis functions in fchk order\n fock_alpha = fock_alpha[:, indices]\n fock_alpha = fock_alpha[indices, :]\n fock_beta = fock_beta[:, indices]\n fock_beta = fock_beta[indices, :]\n\n additional_data['fock_matrix'] = {'alpha': fock_alpha.tolist(), 'beta': fock_beta.tolist()}\n\n if scratch_read_level == -1:\n # FILE_ENERGY (Not really worth to read it)\n if '99.0' in file_list:\n with open(work_dir + '99.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n\n # FILE_DENSITY_MATRIX (Already in fchk)\n if '54.0' in file_list:\n with open(work_dir + '54.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n density_alpha = data[:nbas*nbas].reshape(-1, nbas)\n density_beta = data[nbas*nbas: 2*nbas*nbas].reshape(-1, nbas)\n # set basis functions in fchk order\n density_alpha = density_alpha[:, indices]\n density_alpha = density_alpha[indices, :]\n density_beta = density_beta[:, indices]\n density_beta = density_beta[indices, :]\n additional_data['scf_density_internal'] = {'alpha': density_alpha.tolist(), 'beta': density_beta.tolist()}\n\n # HESSIAN_MATRIX\n if '132.0' in file_list:\n with open(work_dir + '132.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n hessian = data.reshape(-1, natom*3)\n additional_data['hessian'] = hessian.tolist()\n\n # AO_INTS_DEBUG\n if '21.0' in file_list:\n with open(work_dir + '21.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n ao_integrals = data.reshape(-1, nbas, nbas, nbas)\n\n # set basis functions in fchk order\n ao_integrals = ao_integrals[:, :, :, indices]\n ao_integrals = ao_integrals[:, :, indices, :]\n ao_integrals = ao_integrals[:, indices, :, :]\n ao_integrals = ao_integrals[indices, :, :, :]\n\n additional_data['ao_integrals'] = ao_integrals.tolist()\n\n if scratch_read_level > 0:\n # FILE_RAS_AMP\n if '704.0' in file_list:\n with open(work_dir + '705.0', 'r') as f:\n ras_energies = np.fromfile(f, dtype=float)\n n_ras_roots = len(ras_energies)\n\n with open(work_dir + '704.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n ras_amplitudes = data.reshape(n_ras_roots, -1)\n additional_data['ras_amplitudes'] = ras_amplitudes.tolist()\n\n return additional_data", "def test_downloadModel(self):\n\t\tmodel_in = \"\"\n\t\tquery_localdirs = cancerscope.get_models.findmodel(os.path.dirname(cancerscope.__file__), \"v1_rm500\")\n\t\tif query_localdirs is not None:\n\t\t\tmodel_in = query_localdirs[\"v1_rm500\"]\n\t\telse:\n\t\t\tmodel_in = cancerscope.get_models.downloadmodel(model_label=\"v1_rm500\")\n\t\t\n\t\tself.assertTrue(os.path.isdir(model_in))\n\t\tself.assertTrue(os.path.exists(\"\".join([model_in, \"/lasagne_bestparams.npz\"])))\n\t\t\n\t\t\"\"\"Test if model can be setup correctly\"\"\"\n\t\tlmodel = cancerscope.scopemodel(model_in)\n\t\tlmodel.fit()\n\t\n\t\tself.assertEqual(len(lmodel.features), 17688)", "def read(f):\n \n if isinstance(f, basestring):\n # If the input is a string, treat as file name\n with open(f) as fh: # Ensure file is closed\n return read(fh) # Call again with file object\n \n # First line contains the date\n date = f.readline()\n if not date:\n raise IOError(\"Cannot read from input file \"+str(filename))\n \n # Second is description\n desc = f.readline()\n \n token = file_numbers(f)\n \n # Third contains number of mesh points\n try:\n npsi = int(token.next())\n ntheta = int(token.next())\n isym = int(token.next())\n except StopIteration:\n raise IOError(\"Unexpected end of file while reading grid size\")\n except ValueError:\n raise IOError(\"Third line should contain npsi, ntheta and isym\")\n \n # Check values\n if (isym < 0) or (isym > 1):\n raise IOError(\"isym must be either 0 or 1\")\n if (npsi < 1) or (ntheta < 1):\n raise IOError(\"Invalid npsi=\"+str(npsi)+\" or ntheta=\" + str(ntheta))\n \n # Read normalisation factors\n\n try:\n rcnt = float(token.next())\n xma = float(token.next())\n zma = float(token.next())\n btor = float(token.next())\n curtot = float(token.next())\n eaxe = float(token.next())\n dnorm = float(token.next())\n except:\n raise IOError(\"Couldn't read normalisation factors\")\n \n def read_array(n, name=\"Unknown\"):\n data = np.zeros([n])\n try:\n for i in np.arange(n):\n data[i] = float(token.next())\n except:\n raise IOError(\"Failed reading array '\"+name+\"' of size \", n)\n return data\n\n def read_2d(nx, ny, name=\"Unknown\"):\n data = np.zeros([nx, ny])\n for i in np.arange(nx):\n data[i,:] = read_array(ny, name+\"[\"+str(i)+\"]\")\n return data\n\n # Read 1D arrays\n psiflux = read_array(npsi, \"psiflux\")\n fnorm = read_array(npsi, \"fnorm\")\n ffpnorm = read_array(npsi, \"ffpnorm\")\n ponly = read_array(npsi, \"ponly\")\n pponly = read_array(npsi, \"pponly\")\n qsf = read_array(npsi, \"qsf\")\n d = read_array(npsi, \"d\")\n \n dpdz = read_array(ntheta, \"dpdz\")\n dpdr = read_array(ntheta, \"dpdr\")\n \n # 2D arrays\n \n xnorm = read_2d(ntheta, npsi, \"xnorm\")\n znorm = read_2d(ntheta, npsi, \"znorm\")\n \n # Try to read Br and Bz (may be present)\n try:\n Br = read_2d(ntheta, npsi, \"Br\")\n Bz = read_2d(ntheta, npsi, \"Bz\")\n except:\n Br = Bz = None\n \n ny = ntheta\n\n if isym == 1:\n # Fill in values for up-down symmetric case\n print(\"Grid is up-down symmetric. Reflecting grid about midplane\")\n ny = tsize = 2*(ntheta - 1) + 1\n \n def reflect(data, mapfunc = lambda x:x):\n \"\"\" Reflect a variable about midplane\n Optionally supply a mapping function\"\"\"\n data2 = np.zeros([tsize, npsi])\n # Copy the original data\n for i in np.arange(ntheta):\n data2[i,:] = data[i,:]\n # Now fill in the remainder\n for i in np.arange(ntheta, tsize):\n t0 = tsize - 1 - i\n data2[i,:] = mapfunc(data[t0,:])\n return data2\n \n xnorm = reflect(xnorm)\n znorm = reflect(znorm, lambda x: 2.*zma - x) # Reflect about zma\n if Br != None:\n Br = reflect(Br, lambda x:-x) # Br reverses\n if Bz != None:\n Bz = reflect(Bz) # Bz remains the same\n theta = tsize\n\n # Make sure we have Br, Bz and Bpol\n\n if (Br == None) or (Bz == None):\n # Calculate Bpol from psi then Br and Bz from Bpol\n # Use dpsi = R*Bp dx (for now)\n Bpol = np.zeros([ny, npsi])\n \n def deriv(f):\n n = np.size(f)\n dfdi = np.zeros(n)\n dfdi[1:-1] = (f[2:n] - f[0:-2])/2. # Central difference in the middle\n dfdi[0] = f[1] - f[0]\n dfdi[-1] = f[-1] - f[-2]\n return dfdi\n \n for i in np.arange(ntheta):\n drdi = deriv(xnorm[i, :])\n dzdi = deriv(znorm[i, :])\n dldi = sqrt(drdi**2 + dzdi**2) # Arc length\n dpsidi = deriv(psiflux)\n \n Bpol[i, :] = dpsidi / (dldi * xnorm[i,:])\n else:\n Bpol = np.sqrt(Br**2 + Bz**2)\n \n # Calculate toroidal field\n Btor = fnorm / xnorm\n \n #########################################\n # Create a dictionary of values to return\n # \n # Need to transpose 2D arrays to [psi, theta] \n # to be consistent with elite inputs\n \n var = {\"npsi\":npsi, \"npol\":ny, # Sizes\n \n \"psi\":psiflux,\n \"f(psi)\":fnorm,\n \"p\":ponly,\n \n \"R\": np.transpose(xnorm),\n \"Z\": np.transpose(znorm),\n\n \"Bp\":np.transpose(Bpol),\n \"Bt\":np.transpose(Btor),\n\n \"q\":qsf,\n\n \"ffprime\":ffpnorm,\n \"pprime\":pponly}\n\n if Br != None:\n var['Br'] = np.transpose(Br)\n if Bz != None:\n var['Bz'] = np.transpose(Bz)\n \n return var", "def _load20news_miao():\n DIR = os.path.dirname(os.path.realpath(__file__)).split('vae_sparse')[0]+'vae_sparse/optvaedatasets'\n DIR += '/20news_miao'\n h5file = DIR+'/miao.h5'\n if not os.path.exists(h5file):\n flen = len(open(DIR+'/vocab').readlines())\n print 'DIM: ',flen\n np.random.seed(1)\n TRAIN_VALID_MAT = readSparseFile(DIR+'/train.feat', flen, zeroIndexed=False)\n idx = np.random.permutation(TRAIN_VALID_MAT.shape[0])\n VALIDMAT = TRAIN_VALID_MAT[idx[:500]]\n TRAINMAT = TRAIN_VALID_MAT[idx[500:]]\n TESTMAT = readSparseFile(DIR+'/test.feat', flen, zeroIndexed=False) \n saveSparseHDF5(TRAINMAT,'train', h5file)\n saveSparseHDF5(VALIDMAT,'valid', h5file)\n saveSparseHDF5(TESTMAT, 'test' , h5file)\n dset = {}\n dset['vocabulary']= [k.strip().split(' ')[0] for k in open(DIR+'/vocab').readlines()]\n dset['train'] = loadSparseHDF5('train',h5file)\n dset['valid'] = loadSparseHDF5('valid',h5file)\n dset['test'] = loadSparseHDF5('test',h5file)\n dset['dim_observations'] = dset['train'].shape[1]\n dset['data_type'] = 'bow'\n return dset" ]
[ "0.5908623", "0.58457416", "0.583959", "0.5752228", "0.56917495", "0.5652151", "0.56139976", "0.55662304", "0.5519502", "0.5518119", "0.550503", "0.54647243", "0.54624826", "0.54254407", "0.5418097", "0.53947306", "0.5358178", "0.53448343", "0.53399765", "0.53206843", "0.53073", "0.5301678", "0.52996284", "0.5296507", "0.52858573", "0.52767074", "0.5246619", "0.5240502", "0.52327335", "0.5231075", "0.52224874", "0.52077746", "0.5204285", "0.51867676", "0.51730835", "0.51704437", "0.51701117", "0.51604", "0.5159715", "0.51580757", "0.51524884", "0.51451796", "0.5136827", "0.5135582", "0.5130688", "0.51291054", "0.5127035", "0.512051", "0.5120081", "0.51093376", "0.5107225", "0.5103243", "0.5103044", "0.5088394", "0.50857854", "0.50809836", "0.5077965", "0.5069564", "0.5063201", "0.5063101", "0.506226", "0.5061864", "0.5058287", "0.505343", "0.5030379", "0.50276643", "0.50170946", "0.5015198", "0.5014736", "0.5014736", "0.5010809", "0.5009074", "0.50066906", "0.5006632", "0.4993462", "0.4988966", "0.49875498", "0.49814337", "0.49809217", "0.49786565", "0.4973392", "0.497145", "0.49701893", "0.49647358", "0.49604714", "0.49564463", "0.49561447", "0.4954181", "0.4953616", "0.49535322", "0.49524146", "0.49474093", "0.49467525", "0.4945772", "0.49454495", "0.49364108", "0.49331895", "0.49290755", "0.4927107", "0.49155453", "0.4914306" ]
0.0
-1
Read the VQE convergence data for the mini BMN model from disk
def plot_convergence( optimizers: list = ["COBYLA", "SLSQP", "L-BFGS-B", "NELDER-MEAD"], g2N: float = 0.2, maxit: int = 10000, varform: list = ["ry"], depth: int = 3, nrep: int = 10, dataprefix: str = "data/miniBMN", datasuffix: str = "h5", figprefix: str = "figures/miniBMN", ht: float = 0.0, up: int = 1000, ): # setup parameters params = dict() params["l"] = str(g2N).replace(".", "") params["d"] = depth params["v"] = "-".join(varform) params["m"] = maxit params["n"] = nrep params["f"] = dataprefix params["s"] = datasuffix assert type(optimizers).__name__ == "list" # collect data result = collect_data(optimizers, params) # get best runs gs = dict() for r in optimizers: gs[r] = result.loc[r].groupby("rep").apply(min).energy gsdf = pd.DataFrame.from_dict(gs, dtype=float) print(gsdf.describe().T[["min", "max", "mean", "std"]]) # Plot # select the best runs for each optimizer fig, ax = plt.subplots() for o in optimizers: result.loc[o, gsdf[o].idxmin()].plot( x="counts", y="energy", xlim=[0, up], label=o, ax=ax ) ax.axhline(ht, c="k", ls="--", lw="2", label="HT") ax.set_xlabel("iterations") ax.set_ylabel("VQE energy") ax.legend(loc="upper right") filename = f"{figprefix}_l{params['l']}_convergence_{params['v']}_depth{params['d']}_nr{params['n']}_max{params['m']}_xlim{up}" plt.savefig(f"{filename}.pdf") plt.savefig(f"{filename}.png") plt.savefig(f"{filename}.svg") plt.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_models():\n model_files_cvd = np.sort(glob.glob(\"./grad_results/cvd*N1024_f0003.npy\"))\n model_files_mnist = np.sort(glob.glob(\"./grad_results/mnist*N25000_f02.npy\"))\n\n model_files_cvd = np.array([model_files_cvd[2], model_files_cvd[1], model_files_cvd[0]])\n\n results_cvd = []\n results_mnist = []\n\n for filename in model_files_cvd:\n results_cvd.append(np.load(filename))\n \n for filename in model_files_mnist:\n results_mnist.append(np.load(filename))\n\n return np.array(results_mnist), np.array(results_cvd)", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def readin():\r\n nodes = np.loadtxt('Vnodes.txt', ndmin=2)\r\n mats = np.loadtxt('Vmater.txt', ndmin=2)\r\n elements = np.loadtxt('Veles.txt', ndmin=2)\r\n loads = np.loadtxt('Vloads.txt', ndmin=2)\r\n return nodes, mats, elements, loads", "def read_dataset_v1():\n path = load_config()\n T = feather.read_dataframe(path['data_dir'] / 'T_dat.feather')\n E = feather.read_dataframe(path['data_dir'] / 'E_dat.feather')\n M = feather.read_dataframe(path['data_dir'] / 'Meta.feather')\n data = sio.loadmat(path['data_dir'] / 'highvar_genes.mat', squeeze_me=True)\n return T, E, M, data", "def read_dataset_v2():\n path = load_config()\n T = feather.read_dataframe(path['data_dir'] / 'T_dat_v2.feather')\n E = feather.read_dataframe(path['data_dir'] / 'E_dat_v2.feather')\n M = feather.read_dataframe(path['data_dir'] / 'Meta_v2.feather')\n data = sio.loadmat(path['data_dir'] / 'highvar_genes_v2.mat', squeeze_me=True)\n return T, E, M, data", "def load_vae_full(path, nb_of_bands, folder=False): \n latent_dim = 32\n \n # Build the encoder and decoder\n encoder, decoder = model.vae_model(latent_dim, nb_of_bands)\n\n # Build the model\n vae_loaded, vae_utils, Dkl = vae_functions.build_vanilla_vae(encoder, decoder, full_cov=False, coeff_KL = 0)\n\n if folder == False: \n vae_loaded.load_weights(path)\n else:\n print(path)\n latest = tf.train.latest_checkpoint(path)\n vae_loaded.load_weights(latest)\n\n return vae_loaded, vae_utils, encoder, decoder, Dkl", "def main(model_path='models/Nakakuki_Cell_2010_ODE'):\n n_file = []\n fitparam_files = os.listdir(model_path.strip('/') + '/fitparam')\n for file in fitparam_files:\n if re.match(r'\\d', file):\n n_file.append(int(file))\n for nth_paramset in n_file:\n os.makedirs(\n model_path.strip('/') \n + '/dat2npy/out/{:d}'.format(nth_paramset), exist_ok=True\n )\n nth_fitparam_files = os.listdir(\n model_path.strip('/') + '/fitparam/{:d}'.format(nth_paramset)\n )\n for dat_file in nth_fitparam_files:\n if 'fit' in dat_file:\n \"\"\"\n - fit_param%d.dat -> fit_param%d.npy\n - best_fitness.dat -> best_fitness.npy\n \"\"\"\n try:\n data = np.loadtxt(\n model_path.strip('/') + '/fitparam/{:d}/{}'.format(\n nth_paramset, dat_file\n ), dtype='float'\n )\n except ValueError:\n pass\n else:\n \"\"\"\n - count_num.dat -> count_num.npy\n - generation.dat -> generation.npy\n \"\"\"\n data = np.loadtxt(\n model_path.strip('/') + '/fitparam/{:d}/{}'.format(\n nth_paramset, dat_file\n ), dtype='int'\n )\n np.save(\n model_path.strip('/') + '/dat2npy/out/{:d}/'.format(nth_paramset)\n + dat_file.replace('.dat', '.npy'), data\n )\n if os.path.isfile(\n './logs/{:d}.log'.format(nth_paramset)):\n shutil.copyfile(\n './logs/{:d}.log'.format(nth_paramset),\n model_path.strip('/') \n + '/dat2npy/out/{:d}/optimization.log'.format(nth_paramset)\n )", "def load_data():\n\n dump_path = dump_base + '/micro_poi/mpoi_info/'\n\n assert os.path.exists(dump_path)\n\n dpath = dump_path + 'shortest_path.pickle'\n paths = joblib.load(dpath)\n\n dpath = dump_path + 'path_list.pickle'\n path_list = joblib.load(dpath)\n\n dpath = dump_path + 'gain.pickle'\n gain = joblib.load(dpath)\n\n dpath = dump_path + 'stay.pickle'\n stay_time = joblib.load(dpath)\n\n dpath = dump_path + 'reach.pickle'\n reach_time = joblib.load(dpath)\n\n spath = dump_base + '/micro_poi/model_params.list'\n model_params = np.loadtxt(spath)\n\n return np.array(paths), path_list, gain, stay_time, reach_time, model_params", "def load_back_from_disk(data_dir, istrain=True):\n \"\"\"load back metadata_df\"\"\"\n meta_data = pickle.load(open(os.path.join(data_dir, 'meta.pkl'), 'rb'))\n metadata_rows = meta_data[0]\n max_node = meta_data[1]\n\n \"\"\"itershard by loading from disk\"\"\"\n all_X, all_y, all_size, all_L, all_names, all_node_img = [], [], [], [], [], []\n\n for _, row in enumerate(metadata_rows):\n X = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['X'])))\n L = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['L'])))\n y = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['y'])))\n size = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['size'])))\n names = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['name'])))\n node_img = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['node_img'])))\n\n \"\"\" stack to list\"\"\"\n all_X.append(X)\n all_y.append(y)\n all_L.append(L)\n all_size.append(size)\n all_names.append(names)\n all_node_img.append(node_img)\n\n \"\"\" return a Dataset contains all X, y, w, ids\"\"\"\n all_X = np.squeeze(np.vstack(all_X))\n all_L = np.squeeze(np.vstack(all_L))\n all_y = np.squeeze(np.concatenate(all_y))\n all_size = np.squeeze(np.concatenate(all_size))\n all_names = np.squeeze(np.concatenate(all_names))\n all_node_img = np.squeeze(np.concatenate(all_node_img))\n\n # create output dataset\n dataset = dict()\n if istrain:\n dataset['X'] = all_X[:TRAIN_NUM]\n dataset['y'] = all_y[:TRAIN_NUM]\n dataset['size'] = all_size[:TRAIN_NUM]\n dataset['L'] = all_L[:TRAIN_NUM]\n dataset['name'] = all_names[:TRAIN_NUM]\n dataset['node_img'] = all_node_img[:TRAIN_NUM]\n else:\n dataset['X'] = all_X[:TEST_NUM]\n dataset['y'] = all_y[:TEST_NUM]\n dataset['size'] = all_size[:TEST_NUM]\n dataset['L'] = all_L[:TEST_NUM]\n dataset['name'] = all_names[:TEST_NUM]\n dataset['node_img'] = all_node_img[:TEST_NUM]\n\n return dataset, max_node", "def load_vae_conv(path,nb_of_bands,folder = False): \n latent_dim = 32\n \n # Build the encoder and decoder\n encoder, decoder = model.vae_model(latent_dim, nb_of_bands)\n\n # Build the model\n vae_loaded, vae_utils, Dkl = vae_functions.build_vanilla_vae(encoder, decoder, full_cov=False, coeff_KL = 0)\n\n if folder == False: \n vae_loaded.load_weights(path)\n else:\n latest = tf.train.latest_checkpoint(path)\n vae_loaded.load_weights(latest)\n\n return vae_loaded, vae_utils, encoder, Dkl", "def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76", "def read_model(node_file, mater_file, els_file, load_file, verbose=True):\n # rea\n nodes = np.loadtxt(node_file)\n mats = np.loadtxt(mater_file)\n elements = np.loadtxt(els_file, dtype=int)\n loads = np.loadtxt(load_file)\n \n # Generate echo files\n if verbose:\n np.savetxt(\"KNODES.txt\", nodes, fmt='%5.2f', delimiter=' ')\n np.savetxt(\"KMATES.txt\", mats, fmt='%5.2f', delimiter=' ')\n np.savetxt(\"KELEMS.txt\", elements, fmt='%5.2f', delimiter=' ')\n np.savetxt(\"KLOADS.txt\", loads, fmt='%5.2f', delimiter=' ')\n \n return nodes, mats, elements, loads", "def read_glm_epochs(infile):\n with open(infile, 'rb') as outp:\n glmepec = pickle.load(outp)\n return glmepec", "def parse_BS_data(retrieved_folder, fermi_level, kpoints):\n # conversion factor from Ry to eV\n eVscale = get_Ry2eV()\n\n retrieved_list = retrieved_folder.list_object_names()\n qdos_file_list = [i for i in retrieved_list if 'qdos.' in i]\n q_vec_file = 'qvec.dat'\n\n if q_vec_file in retrieved_list:\n with retrieved_folder.open(q_vec_file) as file_opened:\n q_vec = np.loadtxt(file_opened, skiprows=1)\n\n for icount, fname in enumerate(qdos_file_list):\n with retrieved_folder.open(fname) as _f:\n loaded_file = np.loadtxt(_f)\n if icount == 0:\n total_qdos = loaded_file\n else:\n total_qdos[:, 5:] += loaded_file[:, 5:]\n\n ef = fermi_level.value # in Ry unit\n total_qdos[:, 0] = (total_qdos[:, 0] - ef) * eVscale\n eng_points = set(total_qdos[:, 0])\n eng_points = np.sort(list(eng_points))\n no_eng_points = len(eng_points)\n\n qdos_intensity = np.ndarray(shape=(no_eng_points, len(q_vec)))\n for ne in range(np.shape(qdos_intensity)[0]):\n nk = np.shape(qdos_intensity)[1]\n # sum up all l-channels (5 is only the s-channel!)\n qdos_intensity[ne, :] = np.sum(total_qdos[ne * nk:(ne + 1) * nk, 5:], axis=1) / eVscale\n\n qdos_intensity = qdos_intensity.T # setting eng-kpts corresponds to x-y asix\n q_vec = np.asarray(q_vec) # converting q_vec into array\n eng_points = (np.asarray(eng_points)) # converting eng_popints into array in Ry unit\n\n # To save into the ArrayData\n array = ArrayData()\n array.set_array('BlochSpectralFunction', qdos_intensity)\n array.set_array('Kpts', q_vec)\n array.set_array('energy_points', eng_points)\n if kpoints.labels is not None:\n klbl_dict = dict(kpoints.labels) # Special k-points\n array.extras['k-labels'] = klbl_dict\n\n return {'BS_Data': array}", "def load_data_from_disk(self):\n data = dict()\n Omega_M = self.theta_fid[0]\n der_den = 1. / (2. * self.delta_theta)\n\n print (\"Loading data from disk.. Omega_M = \", Omega_M, \"delta_theta = \", self.delta_theta[0])\n\n for key in ['x_central', 'x_m', 'x_p', 'x_central_test', 'x_m_test', 'x_p_test']:\n data[key] = np.load(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy')\n\n return data, der_den", "def test_run_read(self):\n\n self.ictrl[0] = 1 + 2 + 4 + 8\n vmec_f90wrap.runvmec(self.ictrl, self.filename, self.verbose, \\\n self.fcomm, reset_file)\n\n self.assertTrue(self.ictrl[1] in success_codes)\n\n self.assertEqual(vmec_f90wrap.vmec_input.nfp, 3)\n self.assertEqual(vmec_f90wrap.vmec_input.mpol, 4)\n self.assertEqual(vmec_f90wrap.vmec_input.ntor, 3)\n print('rbc.shape:', vmec_f90wrap.vmec_input.rbc.shape)\n print('rbc:',vmec_f90wrap.vmec_input.rbc[101:103, 0:4])\n\n # n = 0, m = 0:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.rbc[101,0], 1.3782)\n\n # n = 0, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[101,1], 4.6465E-01)\n\n # n = 1, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[102,1], 1.6516E-01)\n\n # Now try reading in the output\n wout_file = os.path.join(os.path.dirname(__file__), 'wout_li383_low_res.nc')\n ierr = 0\n vmec_f90wrap.read_wout_mod.read_wout_file(wout_file, ierr)\n self.assertEqual(ierr, 0)\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.betatot, \\\n 0.0426215030653306, places=4)\n\n print('iotaf.shape:',vmec_f90wrap.read_wout_mod.iotaf.shape)\n print('rmnc.shape:',vmec_f90wrap.read_wout_mod.rmnc.shape)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.iotaf[-1], \\\n 0.654868168783638, places=4)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.rmnc[0, 0], \\\n 1.4773028173065, places=4)", "def read_model(filename):\n return joblib.load(filename)", "def load_data(self):\n x_vector = pickle.load(open(self.file_stem + \"x.pickle\", \"rb\"))\n ode_sols = pickle.load(open(self.file_stem + \"sols.pickle\", \"rb\"))\n forcings = pickle.load(open(self.file_stem + \"fs.pickle\", \"rb\"))\n sl_coeffs = pickle.load(open(self.file_stem + \"coeffs.pickle\", \"rb\"))\n\n return x_vector, ode_sols, forcings, sl_coeffs", "def load_model_file(device_index):\n print(\"\\nStart loading model...\")\n\n return kdp_wrapper.isi_load_nef(device_index, MODEL_FILE, ISI_APP_ID)", "def test_readfile(self):\n fname = os.path.join(self.datadir, 'monol_testA_E3-50_rebin4_gti') + \\\n HEN_FILE_EXTENSION\n command = \"{0}\".format(fname)\n\n hen.io.main(command.split())", "def load_NMF_model():\n model = pickle.load(open(\"models/nmf_model.sav\", 'rb'))\n Q = model.components_ \n return model, Q", "def read_qmcpack_dense(filename):\n with h5py.File(filename, 'r') as fh5:\n enuc = fh5['Hamiltonian/Energies'][:][0]\n dims = fh5['Hamiltonian/dims'][:]\n hcore = fh5['Hamiltonian/hcore'][:]\n chol = fh5['Hamiltonian/DenseFactorized/L'][:]\n\n return hcore, chol, enuc", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def _read_csvs(self):\n self.data = pd.read_csv(self.path+self.name, index_col=0)", "def load_epsvec( fname ):\n try:\n E = numpy.loadtxt( fname )\n except IOError:\n prefix = '/data/jberwald/neurons/epsilons/'\n E = numpy.loadtxt( prefix + fname )\n return E", "def test_read_input(self):\n self.ictrl[0] = run_modes['input']\n vmec_f90wrap.runvmec(self.ictrl, self.filename, self.verbose, \\\n self.fcomm, reset_file)\n\n self.assertTrue(self.ictrl[1] in success_codes)\n\n self.assertEqual(vmec_f90wrap.vmec_input.nfp, 3)\n self.assertEqual(vmec_f90wrap.vmec_input.mpol, 4)\n self.assertEqual(vmec_f90wrap.vmec_input.ntor, 3)\n print('rbc.shape:', vmec_f90wrap.vmec_input.rbc.shape)\n print('rbc:',vmec_f90wrap.vmec_input.rbc[101:103, 0:4])\n\n # n = 0, m = 0:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.rbc[101,0], 1.3782)\n\n # n = 0, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[101,1], 4.6465E-01)\n\n # n = 1, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[102,1], 1.6516E-01)", "def get_examples(data_dir, mode, task_id, shard_id):\n file_path = get_full_filename(data_dir, mode, task_id, shard_id)\n relative_path = \"/\".join(file_path.split(\"/\")[3:])\n tf.logging.info(\"Reading file: %s\" % (file_path))\n print(relative_path)\n #client = storage.Client(projectname, credentials=credentials)\n #bucket = client.get_bucket(bucket_name)\n blob = storage_bucket.blob(relative_path)\n if not blob.exists():\n tf.logging.info(\"Path doesn't exist\")\n return None\n nq_data = extract_nq_data(file_path)\n tf.logging.info(\"NQ data Size: \" + str(len(nq_data.keys())))\n\n tf.logging.info(\"Performing entity extraction\")\n fact_extracted_data = entity_link_nq(nq_data)\n return fact_extracted_data", "def read_data():\n ADV_MAT = np.load('ADV.npy');\n ADJ_MAT = np.load('ADJ.npy');\n PR_MAT = np.load('PR.npy'); \n NN_MAT = np.load('NN.npy');\n for i in range(ADV_MAT.shape[0]):RUNNING_DATA['ADV___'+str(i)] = ADV_MAT[i];\n for i in range(ADJ_MAT.shape[0]):RUNNING_DATA['ADJ___'+str(i)] = ADJ_MAT[i];\n for i in range(PR_MAT.shape[0]):RUNNING_DATA['PR___'+str(i)] = PR_MAT[i];\n for i in range(NN_MAT.shape[0]):RUNNING_DATA['NN___'+str(i)] = NN_MAT[i];", "def load_model(file_index):\n normal, abnormal, all = read_in(file_index, 1, 2, 0.3)\n autoencoder = keras.models.load_model('Working_Data/ae_patient_' + str(file_index) + '_dim' + str(100) + '_model.h5')\n reconstructed = autoencoder.predict(all)\n reconstruction_save = \"Working_Data/reconstructed_cdae_10d_Idx\" + str(file_index) + \".npy\"\n np.save(reconstruction_save, reconstructed)", "def read_mesa(self, filename=None):\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read first line in file to get number of parameters\n f.seek(0)\n l = f.readline()\n nparams_file = int(l.split(' ')[-1])\n\n # skip lines 2-4\n for i in range(3):\n f.readline()\n\n # the fifth line will give us the number of variables\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= 6\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n logR_idx = -1\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n if var_name_file == 'logR':\n logR_idx = i\n continue\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i\n except KeyError:\n var_idx_map[self.idx['spec'] - 1 + network_module.network_species_index(var_name_file.lower())] = i\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n # need to reverse the inputs file here\n\n n = npts_file - i - 1\n\n base_r[n] = R_solar * 10**variables[logR_idx]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[n, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def _fetch_large():\n # Large training data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TRAIN.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"train\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TRAIN.tar.gz\",\n \"mv SMNI_CMI_TRAIN train\",\n \"find train | grep gz$ | xargs gunzip\",\n ],\n )\n # Large test data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TEST.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"test\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TEST.tar.gz\",\n \"mv SMNI_CMI_TEST test\",\n \"find test | grep gz$ | xargs gunzip\",\n ],\n )", "def read_data(self):\n data = np.genfromtxt(self.__file) # Planck SED\n self.__nu = 10.0**data[:,0]\n self.__nuF = 10.0**data[:,2]\n self.__err = 10.0**data[:,3]\n #self.__W = 10.0**data[:,4]\n self.__yerr = [ self.__nuF - self.__nuF / self.__err, \\\n self.__nuF * self.__err - self.__nuF ]\n self.__maxY = max( self.__nuF )\n self.__minY = min( self.__nuF )", "def read_data(self):\n self.data = reduce_spectrum(self.filename)", "def load_vecs():\n global VECTORIZER\n global CECTORIZER\n\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n\n if os.path.isfile(v_file) and os.path.isfile(d_file):\n with open(v_file, 'rb') as f:\n VECTORIZER = pickle.load(f)\n with open(d_file, 'rb') as f:\n CECTORIZER = pickle.load(f)\n return True\n\n return False", "def read_data(data_path):\n tr = data_path + 'train_vectors.txt'\n v = data_path + 'val_vectors.txt'\n tst = data_path + 'test_vectors.txt'\n return tr, v, tst", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def get_data():\r\n if not path_validation(MODEL_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_DATA_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_LABEL_PATH, read_access=True):\r\n exit(0) \r\n\r\n params = joblib.load(MODEL_PATH)\r\n test_images = np.load(TEST_DATA_PATH)\r\n test_labels = np.load(TEST_LABEL_PATH)\r\n\r\n # Addition of bias in test set\r\n test_images = np.insert(test_images, 0, 1, axis=1)\r\n\r\n return params, test_images, test_labels", "def read_wikibrain_vecs(path):\n matrix = []\n with open(path, \"r\") as vecs:\n vecs.readline()\n for line in vecs:\n matrix.append(map(float, line.rstrip(\"\\n\").split(\"\\t\")))\n return matrix", "def get_memes_data(path):\n data = load_files(path)\n memes = np.array(data['filenames'])\n return memes", "def readmodel(model = 'dominguez'):\n ebl_file_path = os.path.join(os.path.split(__file__)[0],'data/')\n\n if model == 'kneiske':\n file_name = join(ebl_file_path , 'ebl_nuFnu_tanja.dat')\n elif model == 'franceschini':\n file_name = join(ebl_file_path , 'ebl_franceschini.dat')\n elif model == 'dominguez':\n file_name = join(ebl_file_path , 'ebl_dominguez11.out')\n elif model == 'dominguez-upper':\n file_name = join(ebl_file_path , 'ebl_upper_uncertainties_dominguez11.out')\n elif model == 'dominguez-lower':\n file_name = join(ebl_file_path , 'ebl_lower_uncertainties_dominguez11.out')\n elif model == 'inoue':\n file_name = join(ebl_file_path , 'EBL_z_0_baseline.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_baseline.dat')\n elif model == 'inoue-low-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_low_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_low_pop3.dat')\n elif model == 'inoue-up-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_up_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_up_pop3.dat')\n elif model == 'gilmore':\n file_name = join(ebl_file_path , 'eblflux_fiducial.dat')\n elif model == 'gilmore-fixed':\n file_name = join(ebl_file_path , 'eblflux_fixed.dat')\n elif model == 'cuba':\n file_name = join(ebl_file_path , 'CUBA_UVB.dat')\n elif model == 'finke':\n file_name = join(ebl_file_path , 'ebl_modelC_Finke.txt')\n else:\n raise ValueError(\"Unknown EBL model chosen!\")\n\n data = np.loadtxt(file_name)\n if model.find('inoue') >= 0:\n z = np.array([0.])\n #z = data[0,1:]\n #nuInu = data[:,1]\n lmu = data[:,0]\n nuInu = np.array([data[:,1]]).T\n raise ValueError('Inoue models not correctly implemented at the moment, choose another model')\n\n elif model.find('gilmore') >= 0:\n z = data[0,1:]\n lmu = data[1:,0] * 1e-4 # convert from Angstrom to micro meter\n nuInu = data[1:,1:] \n nuInu[nuInu == 0.] = 1e-20 * np.ones(np.sum(nuInu == 0.))\n \n # convert from ergs/s/cm^2/Ang/sr to nW/m^2/sr\n nuInu = (nuInu.T * data[1:,0]).T * 1e4 * 1e-7 * 1e9 \n\n elif model == 'cuba':\n z = data[0,1:-1]\n lmu = data[1:,0] * 1e-4\n nuInu = data[1:,1:-1]\n\n # replace zeros by 1e-40\n idx = np.where(data[1:,1:-1] == 0.)\n nuInu[idx] = np.ones(np.sum(nuInu == 0.)) * 1e-20\n\n # in erg / cm^2 / s / sr\n nuInu = (nuInu.T * c.c.value / (lmu * 1e-6)).T \n nuInu *= 1e6 # in nW / m^2 / sr\n\n # check where lmu is not strictly increasing\n idx = np.where(np.diff(lmu) == 0.)\n for i in idx[0]:\n lmu[i+1] = (lmu[i + 2] + lmu[i]) / 2.\n\n else:\n z = data[0,1:]\n lmu = data[1:,0]\n nuInu = data[1:,1:]\n if model == 'finke': \n lmu = lmu[::-1] * 1e-4\n nuInu = nuInu[::-1]\n\n return EBL(z,lmu,nuInu, model = model)", "def load_data_from_fold(data_path):\r\n print(\"\\nLoading data from json folder {}\".format(data_path))\r\n\r\n SAMPLES_TO_CONSIDER = 22050\r\n\r\n data = preprocess_dataset(data_path, SAMPLES_TO_CONSIDER)\r\n\r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def read_files(data_loc, vector_size, window, epochs, min_df, max_df):\n\n class Data: pass\n\n speech = Data()\n\n print(\"-- train data\")\n speech.train_data, speech.train_fnames, speech.train_labels = read_tsv(data_loc, \"train.tsv\")\n print(len(speech.train_data))\n\n print(\"-- dev data\")\n speech.dev_data, speech.dev_fnames, speech.dev_labels = read_tsv(data_loc, \"dev.tsv\")\n print(len(speech.dev_data))\n\n print(\"-- test data\")\n test_data, test_fnames = read_unlabeled(data_loc, 'test')\n\n # print(\"-- unlabeled data\")\n # unlabeled_data, unlabeled_fnames = read_unlabeled(data_loc, 'unlabeled')\n # print(len(unlabeled_fnames))\n\n print(\"-- transforming data and labels\")\n speech.test_fnames = test_fnames\n\n train_docs = []\n train_analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, document in enumerate(speech.train_data):\n words = document\n tags = [\"train_\" + str(i)]\n train_docs.append(train_analyzedDocument(words, tags))\n dev_docs = []\n dev_analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, document in enumerate(speech.dev_data):\n words = document\n tags = [\"dev_\" + str(i)]\n dev_docs.append(dev_analyzedDocument(words, tags))\n test_docs = []\n test_analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')\n for i, document in enumerate(test_data):\n words = document\n tags = [\"test_\" + str(i)]\n test_docs.append(test_analyzedDocument(words, tags))\n docs = train_docs + dev_docs + test_docs\n model = Doc2Vec(vector_size=vector_size, window=window, min_count=1, workers=8, epochs=epochs, negative=5, sample=1e-4)\n model.build_vocab(docs)\n model.train(docs, total_examples=model.corpus_count, epochs=model.epochs)\n print(model.most_similar('북한'))\n speech.train_doc_vec = np.zeros((4120, vector_size))\n speech.dev_doc_vec = np.zeros((4120, vector_size))\n speech.test_doc_vec = np.zeros((4120, vector_size))\n\n for i in range(4120):\n prefix_train = 'train_' + str(i)\n speech.train_doc_vec[i] = model[prefix_train]\n for i in range(4120):\n prefix_train = 'dev_' + str(i)\n speech.dev_doc_vec[i] = model[prefix_train]\n for i in range(4120):\n prefix_test = 'test_' + str(i)\n speech.test_doc_vec[i] = model[prefix_test]\n from sklearn import preprocessing\n speech.le = preprocessing.LabelEncoder()\n speech.le.fit(speech.train_labels)\n speech.target_labels = speech.le.classes_\n speech.trainy = speech.le.transform(speech.train_labels)\n speech.devy = speech.le.transform(speech.dev_labels)\n return speech", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def read_PSSM_data(self):\n\n names = os.listdir(self.pssm_path)\n fname = [n for n in names if n.find(self.molname)==0]\n\n if len(fname)>1:\n raise ValueError('Multiple PSSM files found for %s in %s',self.mol_name,self.pssm_path)\n if len(fname)==0:\n raise FileNotFoundError('No PSSM file found for %s in %s',self.mol_name,self.pssm_path)\n else:\n fname = fname[0]\n\n f = open(self.pssm_path + '/' + fname,'rb')\n data = f.readlines()\n f.close()\n raw_data = list( map(lambda x: x.decode('utf-8').split(),data))\n\n self.res_data = np.array(raw_data)[:,:3]\n self.res_data = [ (r[0],int(r[1]),r[2]) for r in self.res_data ]\n self.pssm_data = np.array(raw_data)[:,3:].astype(np.float)", "def read_data(path, batch_size, qp, frac, kernel, model):\n # load h5 file and get dictionaries\n inputs_dict, labels_dict, _ = get_dataset_dict(path, qp)\n\n # create training / validation dictionaries\n block_keys = [k for k in inputs_dict]\n train_inputs_dict, train_labels_dict, val_inputs_dict, val_labels_dict = (dict() for _ in range(4))\n\n # get inputs / labels for block & frac position\n for block in block_keys:\n inputs = inputs_dict[block][frac]\n\n # only use inputs that can be split 80 / 20 train / validation and fill out a batch\n split_percentage = 4/5\n if len(inputs) < batch_size / split_percentage:\n continue\n\n # if model contains non-linear activations, use same input & label size\n inputs = inputs[:, kernel:-kernel, kernel:-kernel, :] if \"scratch\" not in model else inputs\n\n labels = labels_dict[block][frac]\n\n # shuffle the pairs\n inputs, labels = array_shuffle(len(inputs), inputs, labels)\n\n # split 80 / 20\n (train_inputs, train_labels), (val_inputs, val_labels) = split_data(split_percentage, inputs, labels)\n\n # put into correct dictionary entry\n train_inputs_dict[block] = train_inputs\n train_labels_dict[block] = train_labels\n val_inputs_dict[block] = val_inputs\n val_labels_dict[block] = val_labels\n\n return train_inputs_dict, train_labels_dict, val_inputs_dict, val_labels_dict", "def read_data_set():\n # shapes of datasets -- [] means expanded form:\n # - X: J\n # - net.R: J [x J x 1]\n # - F_DIST: J x J x num_features\n # - F_DIST_w1: J x J x num_features\n # - w['except_first'][-1]: (last weights) J x num_features [x 1]\n # - w['except_first'][1:-1]: (second to last weights) J x J x num_features\n # - first weights **were** also J x J x num_features\n # - w['first_for_r']: J x 1 x num_features\n\n read_X()\n read_weights(read_FDIST())", "def test_brainvision_data():\n assert_raises(IOError, read_raw_brainvision, vmrk_path)\n assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,\n preload=True, scale=\"foo\")\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw_py = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_path, montage=montage,\n eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n assert_true('RawBrainVision' in repr(raw_py))\n\n assert_equal(raw_py.info['highpass'], 0.)\n assert_equal(raw_py.info['lowpass'], 250.)\n\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_py, times_py = raw_py[picks]\n\n # compare with a file that was generated using MNE-C\n raw_bin = Raw(eeg_bin, preload=True)\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_bin, times_bin = raw_bin[picks]\n\n assert_array_almost_equal(data_py, data_bin)\n assert_array_almost_equal(times_py, times_bin)\n\n # Make sure EOG channels are marked correctly\n for ch in raw_py.info['chs']:\n if ch['ch_name'] in eog:\n assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)\n elif ch['ch_name'] == 'STI 014':\n assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)\n elif ch['ch_name'] in raw_py.info['ch_names']:\n assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)\n else:\n raise RuntimeError(\"Unknown Channel: %s\" % ch['ch_name'])\n\n # test loading v2\n read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True,\n response_trig_shift=1000)", "def load_data(m=5000, n=100, path='D:/file/vscode/py/data/mnist.npz'):\r\n f = np.load(path)\r\n x_train, y_train = f['x_train'], f['y_train']\r\n\r\n x_test, y_test = f['x_test'], f['y_test']\r\n\r\n f.close()\r\n return (x_train, y_train), (x_test, y_test)", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def readEEGepoch(eegfilename, mainDir):\n # subject = 'ES9007' \n datapath = os.path.join(mainDir)\n os.chdir(datapath)\n \n folders = os.listdir(datapath)\n \n for dir in folders:\n \n os.chdir(os.path.join(datapath, dir))\n file = glob.glob(eegfilename)\n \n if file:\n print('>>>>>>>>>>>>> file loaded from >>>>>>>>>>>>>>>>>:', os.getcwd())\n filepath = os.path.join(os.getcwd(), eegfilename) \n dat = mne.read_epochs(filepath, preload=True) \n break \n return dat", "def _read_vee(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 2:\n size1, size2 = int(line[0]), int(line[1])\n vee = NP.zeros((size1, size1, size2, size2), dtype=NP.float64)\n elif len(line) == 5:\n mu, nu, lmda, sgma, val = int(line[0]) - 1, int(line[1]) - 1, int(line[2]) - 1, int(line[3]) - 1, NP.float64(line[4])\n vee[mu,nu,lmda,sgma] = \\\n vee[nu,mu,lmda,sgma] = \\\n vee[mu,nu,sgma,lmda] = \\\n vee[nu,mu,sgma,lmda] = \\\n vee[lmda,sgma,mu,nu] = \\\n vee[sgma,lmda,mu,nu] = \\\n vee[lmda,sgma,nu,mu] = \\\n vee[sgma,lmda,nu,mu] = \\\n val\n return vee", "def read_model(input_file):\n with open(input_file) as inp:\n labels = inp.readline().strip().split(\" \")\n init_conc = np.array(list(map(float, inp.readline().strip().split(\" \"))))\n\n stoich = []\n for i in range(len(labels)):\n stoich.append(list(map(float, inp.readline().strip().split(\" \"))))\n S_matrix = np.array(stoich)\n\n educt = []\n for i in range(len(labels)):\n educt.append(list(map(float, inp.readline().strip().split(\" \"))))\n educt_matrix = np.array(educt)\n\n kin_par = np.array(list(map(float, inp.readline().strip().split(\" \"))))\n t_T, t_eval_step = list(map(float, inp.readline().strip().split(\" \")))\n\n return labels, init_conc, S_matrix, educt_matrix, kin_par, t_T, t_eval_step", "def read_szf_fmv_12(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"land_frac\", uint_nan),\n (\"flagfield_gen2\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags(data)\n\n return data, metadata", "def read(self) :\n # Open the file.\n f = open(self.output, 'r')\n lines = f.readlines()\n \n # Find the eigenvalue.\n count = 0\n while True :\n words = lines[count].split()\n if len(words) == 5 :\n if words[0] == \"*\" and words[1] == \"K-EFF\":\n self.keff = float(words[3])\n break\n count += 1\n \n # Find the peaking.\n a = 0 # Assembly index\n \n while True :\n words = lines[count].split()\n if len(words) == 8 :\n if words[0] == \"NODE\" and words[1] == \"AVERAGE\" and words[2] == \"POWERS\" :\n count += 5 # Powers start 5 lines below title\n for row in range(0, self.dimension) :\n words = lines[count].split()\n assert(len(words) >= self.dimension)\n for col in range(0, self.dimension) :\n self.peaking_map[row, col] = float(words[col+1])\n if self.core.stencil[row, col] > 0:\n #print \" a=\", a, \" row=\", row, \" col=\", col, len(self.peaking)\n self.peaking[a] = self.peaking_map[row, col]\n a += 1\n count += 1\n break\n count += 1 \n # Maximum peaking.\n self.maxpeak = np.max(self.peaking)", "def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()", "def __init__(self, qmm, config, update_tof, rr_qe, path, meas_len, smearing, lsb=False):\n\n self.qmm = qmm\n self.config = config\n self.rr_qe = rr_qe\n self.num_of_states = 3\n self.path = path\n self.saved_data = None\n self.time_diff = None\n self.update_tof = update_tof\n self.finish_train = 0\n self.mu = dict()\n self.sigma = dict()\n self._load_file(path)\n self.lsb = lsb\n self.meas_len = meas_len\n self.smearing = smearing", "def load_breeze(self, breeze_path):\n self.breeze = pd.read_pickle(os.path.join(self.data_path, 'breeze.pick'))", "def main():\n parser = argparse.ArgumentParser(description=\"Convert a checkpoint file into a support sets and a reconstructor \"\n \"weights files\")\n parser.add_argument('--exp', type=str, required=True, help=\"set experiment's model dir (created by `train.py`)\")\n\n # Parse given arguments\n args = parser.parse_args()\n\n # Check structure of `args.exp`\n if not osp.isdir(args.exp):\n raise NotADirectoryError(\"Invalid given directory: {}\".format(args.exp))\n models_dir = osp.join(args.exp, 'models')\n if not osp.isdir(models_dir):\n raise NotADirectoryError(\"Invalid models directory: {}\".format(models_dir))\n checkpoint_file = osp.join(models_dir, 'checkpoint.pt')\n if not osp.isfile(checkpoint_file):\n raise FileNotFoundError(\"Checkpoint file not found: {}\".format(checkpoint_file))\n\n print(\"#. Convert checkpoint file into support sets and reconstructor weight files...\")\n\n # Load checkpoint file\n checkpoint_dict = torch.load(checkpoint_file)\n\n # Get checkpoint iteration\n checkpoint_iter = checkpoint_dict['iter']\n print(\" \\\\__Checkpoint iteration: {}\".format(checkpoint_iter))\n\n # Save support sets weights file\n print(\" \\\\__Save checkpoint support sets weights file...\")\n torch.save(checkpoint_dict['support_sets'], osp.join(models_dir, 'support_sets-{}.pt'.format(checkpoint_iter)))\n\n # Save reconstructor weights file\n print(\" \\\\__Save checkpoint reconstructor weights file...\")\n torch.save(checkpoint_dict['reconstructor'], osp.join(models_dir, 'reconstructor-{}.pt'.format(checkpoint_iter)))", "def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies", "def load_data(args, path=\"./project_data/\", dataset=\"paper_author.txt\"):\r\n print('Loading {} dataset...'.format(dataset))\r\n\r\n paper_author = make_graph(path+dataset)\r\n print('The number of nodes :', paper_author.number_of_nodes())\r\n adj = nx.adjacency_matrix(paper_author)\r\n\r\n # build symmetric adjacency matrix\r\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\r\n # features = normalize(features)\r\n adj = normalize(adj + sp.eye(adj.shape[0]))\r\n adj = sparse_mx_to_torch_sparse_tensor(adj)\r\n\r\n if args.model == 'adj':\r\n features = adj\r\n\r\n elif args.model == 'node2vec':\r\n print('Already exist Node2vec file')\r\n file_name = './Node2vec_walk_%s_num_walks_%s_truncated.pickle' % (str(args.walk_length), str(args.num_walks))\r\n if os.path.isfile(file_name):\r\n with open(file_name, 'rb') as file:\r\n features = pickle.load(file)\r\n else:\r\n node2vec = Node2Vec(graph=paper_author, # target graph\r\n dimensions=int(args.feature_node), # embedding dimension\r\n walk_length=int(args.walk_length), # number of nodes in each walks\r\n p=2, # return hyper parameter\r\n q=1, # inout parameter, q값을 작게 하면 structural equivalence를 강조하는 형태로 학습됩니다.\r\n weight_key=None, # if weight_key in attrdict\r\n num_walks=int(args.num_walks), \r\n workers=4,\r\n )\r\n features = torch.tensor(node2vec.fit(window=10, min_count=0).wv.vectors)\r\n with open(file_name, 'wb') as file:\r\n pickle.dump(features, file)\r\n return adj, features # , labels, idx_train, idx_val, idx_test\r", "def read_data(path):\n with h5py.File(path, 'r') as hf:\t\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_", "def read_szx_fmv_11(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\"sat_track_azi\"]\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath_indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"f_kp\", byte_nan), (\"f_usable\", byte_nan), (\"f_f\", uint_nan),\n (\"f_v\", uint_nan), (\"f_oa\", uint_nan), (\"f_sa\", uint_nan),\n (\"f_tel\", uint_nan), (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n return data, metadata", "def data_input(self):\n path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))\n if not os.path.isfile('{0}/{1}.csv'.format(path, self.data_file)):\n print 'Error: Dataset file is not exist.'\n exit()\n # Uplead Dataset.csv file.\n f = open('{0}/{1}.csv'.format(path, self.data_file), 'r')\n print 'Now uploading dataset File.....'\n f = list(f)\n # The Dataset contains heading, number of lines - heading\n self.number_of_VOCs = sum(1 for row in f)-1\n # Count number of columns, last column's value is empty, that is why -1.\n self.number_of_columns = len(f[0].split(',')) -1\n self.first_m_z = int(f[0].split(',')[3]) # find the first m/z value.\n self.last_m_z = int(f[0].split(',')[-2]) # find the last m/z value.\n print 'dataset includes ', self.number_of_VOCs, 'VOCs in all samples '\n print ('dataset includes ', self.number_of_columns, ' Columns, ',\n 'm/z values start from ', self.first_m_z,\n 'and end ', self.last_m_z)\n # Create a matrix with a shape of (number_of_VOCs X number_of_columns) filled with zeros.\n self.dataset = np.zeros((self.number_of_VOCs,\n self.number_of_columns))\n for line in range(1, len(f)):\n if int(float(f[line].strip().split(',')[0])) not in self.loaded_samples:\n self.loaded_samples.append(int(float(f[line].strip().split(',')[0])))\n for column in range(self.number_of_columns):\n self.dataset[line-1][column] = int(float(f[line].strip().split(',')[column]))", "def read_data_model(filename='data/data_model.pkl'):\n\n with open(filename, 'r') as pklfile:\n root = pkl.load(pklfile)\n\n return root", "def load_data(from_stored_data=False):\n\n if from_stored_data:\n #data_X = pickle.load(open(file_X, \"rb\"))\n data_X = pickle.load(open(\"x_sparse_small.p\", \"rb\"))\n #data_Y = pickle.load(open(file_Y, \"rb\"))\n data_Y = pickle.load(open(\"y_sparse_small.p\", \"rb\"))\n return data_X, data_Y\n\n data_X = None\n data_Y = None\n\n for num_subject in range(num_subjects):\n print \"subject :\", str(num_subject+1), \" processing started \"\n ind_data_x = None\n ind_data_y = None\n \n subject_data = sio.loadmat(\"data/data-science-P\" + str(num_subject + 1) + \".mat\")\n\n # big three headers\n meta = subject_data.get(\"meta\")\n info = subject_data.get(\"info\")[0]\n trials = subject_data.get(\"data\")\n\n # meta data\n nvoxels = meta[\"nvoxels\"][0][0][0][0]\n colToCoord = meta[\"colToCoord\"][0][0]\n coordToCol = meta[\"coordToCol\"][0][0]\n for num_trial in range(len(trials)):\n sys.stdout.write(str(num_trial)+\" \")\n sys.stdout.flush()\n # create feature vectors\n voxels = trials[num_trial][0][0]\n #feature_vec = np.zeros(dim_x * dim_y * dim_z)\n feature_vec = np.zeros((dim_x_half, dim_y, dim_z))\n for i in range(len(voxels)):\n # save only the left of the voxels to decrease the dimension of the vector \n colInfo = colToCoord[i, :]\n x = colInfo[0] - 1 # index in data starts from 1\n y = colInfo[1] - 1 # same\n z = colInfo[2] - 1 # same\n if x < dim_x_half:\n feature_vec[x][y][z] = voxels[i]\n #feature_vec[z * (dim_x * dim_y) + y * dim_x + x] = voxels[i]\n #feature_vec[z * (dim_x_half * dim_y) + y * dim_x_half + x] = voxels[i]\n feature_vec = feature_vec.flatten()\n feature_vec = sp.csr_matrix(feature_vec)\n\n # create label vectors\n trial_info = info[num_trial]\n cond_number = trial_info[1][0][0] - 2 # starts from 2 (2 ~ 13)\n word_number = trial_info[3][0][0] - 1 # starts from 1 (1 ~ 5)\n label_vec = np.zeros(num_conds * num_words_per_cond)\n label_vec[cond_number * num_words_per_cond + word_number] = 1\n \n # append data\n #data_X = sp.vstack((data_X, feature_vec)) if data_X is not None else feature_vec\n #data_Y = np.vstack((data_Y, label_vec)) if data_Y is not None else label_vec\n ind_data_x = sp.vstack((ind_data_x, feature_vec)) if ind_data_x is not None else feature_vec\n ind_data_y = np.vstack((ind_data_y, label_vec)) if ind_data_y is not None else label_vec\n\n # save ind_data files\n pickle.dump(ind_data_x, open(\"ind_\"+str(num_subject+1)+\"_x\", \"wb\"))\n pickle.dump(ind_data_y, open(\"ind_\"+str(num_subject+1)+\"_y\", \"wb\"))\n\n print \"subject :\", str(num_subject+1), \" processing done \"\n \n # save data file\n #pickle.dump(data_X, open(file_X, \"wb\"))\n #pickle.dump(data_Y, open(file_Y, \"wb\"))\n\n return data_X, data_Y", "def read():\n\n # load json and create model\n base_model = _model_builder.Network(0, model_type=\"load_model\")\n\n #load image and process\n digit = Image.open(\"./data/number.jpg\").convert(\"L\")\n digit = ImageOps.expand(digit,border=60,fill='black')\n digit = digit.resize((28, 28))\n\n #flatten the matrix (for input into MLP network todo:CNN)\n digit_flat = numpy.zeros((1, 784))\n counter = 0\n for j in range(0, 28):\n for i in range(0, 28):\n digit_flat[0][counter] = (digit.getpixel((i, j)))/255.0\n counter = counter+1\n\n #predict\n os.system('clear')\n base_model.predict(digit_flat)", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def Subtask4_pre_train_5():\n with open(PATH + 'pre_train_4_Subtask4.txt', encoding='utf-8') as fi:\n evi = eval(fi.read())\n\n train_data = np.load(PATH + 'pre_train_2_Subtask4.npy', allow_pickle=True).item()\n model = word2vec.KeyedVectors.load_word2vec_format(PATH + \"data/GoogleNews-vectors-negative300.bin\", binary=True)\n\n with open(PATH + 'pre_train_3_Subtask4.txt', encoding='utf-8') as f:\n document = eval(f.read())\n\n with open(PATH + 'traindata_Subtask4.txt', 'w') as fp:\n for data in train_data.items():\n claim = data[0]\n claim = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", claim)\n claim = claim.split(' ')\n claim = list(filter(lambda x: x in model.vocab, claim))\n Vi = []\n for i in range(len(claim)):\n Vi.append(model[claim[i]])\n\n V = np.zeros(len(Vi[0]))\n for i in range(len(claim)):\n for j in range(len(Vi[0])):\n V[j] = V[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V[i] * V[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V[i] = V[i] / rms\n V = V.astype(str).tolist()\n\n for doc in data[1]:\n lines = document[doc].split('\\n')\n for k in range(len(lines)):\n label = [data[0], doc, k]\n line = document[doc].split('\\n')[k]\n if line != str(k) + '\\t':\n line = line.replace(str(k) + '\\t', '')\n line = line.split('\\t')[0]\n line = re.sub(\"[-,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", line)\n line = line.split(' ')\n line = list(filter(lambda x: x in model.vocab, line))\n if len(line) != 0:\n Vi = []\n for i in range(len(line)):\n Vi.append(model[line[i]])\n\n V1 = np.zeros(len(Vi[0]))\n for i in range(len(line)):\n for j in range(len(Vi[0])):\n V1[j] = V1[j] + Vi[i][j]\n\n rms = 0\n for i in range(len(Vi[0])):\n rms += V1[i] * V1[i]\n rms = np.sqrt(rms / len(Vi[0]))\n\n for i in range(len(Vi[0])):\n V1[i] = V1[i] / rms\n V1 = V1.astype(str).tolist()\n\n if label in evi:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 1' + '\\n')\n else:\n fp.write(' '.join(V) + ' ' + ' '.join(V1) + ' 0' + '\\n')", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n return data", "def read_word2vec_model():\n file_name = \"word2vec_model.txt\"\n # these are the pre-2018 lines to load a model:\n # from gensim.models.word2vec import Word2Vec\n # m = Word2Vec.load_word2vec_format(file_name, binary=False)\n \n # here are the post-2018 lines to load a model:\n from gensim.models import KeyedVectors\n print(\"Starting to load the model in \", file_name, \"...\")\n m = KeyedVectors.load_word2vec_format(file_name, binary=False)\n print(\"Model loaded.\\n\")\n\n print(\"The model built is\", m, \"\\n\")\n print(\"m.vocab has\", len(m.vocab), \"words\")\n ## The above line should print\n ## m.vocab has 43981 words\n\n print(\"Each word is a vector of size\", m.vector_size)\n ## which should tells us that each word is represented by a 300-dimensional vector\n\n print(\"\\nTry m.get_vector('hello') to see one...!\\n\")\n ## Once the model is built, it can't be changed without rebuilding it; we'll leave it. \n\n return m", "def get_model_data_from_files(self, oc):\r\n # Load model related files\r\n model_path = self.config['DATA_PATH'] + self.config['CUSTOMER_NAME'] + '/models/'\r\n\r\n features_file = model_path + self.task + '_' + str(oc) + '_features.txt'\r\n dummies_file = model_path + self.task + '_' + str(oc) + '_dummies.txt'\r\n model_file = model_path + self.task + '_' + str(oc) + '.joblib'\r\n\r\n if os.path.isfile(features_file) and os.path.isfile(dummies_file) and os.path.isfile(model_file):\r\n model = joblib.load(model_file)\r\n features = open(features_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n dummies = open(dummies_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n return (model, features, dummies)\r\n return (None, None, None)", "def read_data_test(path):\n with h5py.File(path, 'r') as hf:\n input_ = np.array(hf.get('data'))\n label_ = np.array(hf.get('label'))\n\t\n return input_, label_", "def read_nc(self, fname=None):\n fname = fname if fname else self.fname\n\n super(EigFile, self).read_nc(fname)\n\n with nc.Dataset(fname, 'r') as root:\n\n # nspin, nkpt, nband\n self.EIG = root.variables['Eigenvalues'][:,:,:] \n\n # nkpt, 3\n self.Kptns = root.variables['Kptns'][:,:]", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def model_input(step=0.1):\n model = np.loadtxt('start_model.dat',dtype={'names': ('H', 'VP','VS','RHO','QP','QS',\\\n 'ETAP','ETAS','FREFP','FREFS'),'formats': ('f4', 'f4','f4','f4',\\\n 'f4','f4','f4','f4','f4','f4')}, skiprows=1)\n \n f = open('model96_input.tmp', 'w+')\n f.write('model_step96.in\\nIsotropic model\\n0\\n')\n d = np.loadtxt('cumul_depths.tmp')\n for i in np.arange(len(d)):\n for k, s in enumerate(np.arange(0,d[-1],step)):\n if s < d[i] and i==0:\n f.write('%s %s %s %s %s %s %s %s %s %s\\n'\n %(step, model['VP'][i], model['VS'][i], model['RHO'][i],\\\n model['QP'][i], model['QS'][i], model['ETAP'][i], model['ETAS'][i],\\\n model['FREFP'][i], model['FREFS'][i]))\n if i > 0:\n if s < d[i] and s > d[i-1]:\n f.write('%s %s %s %s %s %s %s %s %s %s\\n'\n %(step, model['VP'][i], model['VS'][i], model['RHO'][i],\\\n model['QP'][i], model['QS'][i], model['ETAP'][i], model['ETAS'][i],\\\n model['FREFP'][i], model['FREFS'][i]))\n f.close()\n os.system(\"mkmod96 < model96_input.tmp\")\n print (\">> Model_step96.in is ready... next step is comp_disp.bash\")\n print ('>> nlayers =', k)\n print (\">> Change nlayer in eigenfucntion_*.bash!!!!\")\n print (\">> mkmod96\")", "def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)", "def train (X, Y, modelfile='Q2/models/gaussianBinary.model', gamma=0.05, showProgress=False):\n tick = time.time()\n X = np.matrix(X)\n Y = np.matrix(Y).T\n\n m, n = X.shape\n\n # Find the Kernel Matrix KM\n KM = gaussianKM (X, X, gamma)\n\n # Parameters for CVXOPT\n YQ = Y * Y.T\n Q = np.multiply (YQ, KM)\n p = np.matrix(-np.ones((m, 1)))\n G = np.matrix(np.vstack( (-np.identity(m), np.identity(m)) ))\n h = np.matrix(np.vstack( (np.zeros((m,1)), np.ones((m,1))) ))\n A = Y.T\n b = 0\n \n # Running CVXOPT\n Q = cvx.matrix(Q)\n p = cvx.matrix(p)\n G = cvx.matrix(G)\n h = cvx.matrix(h)\n A = cvx.matrix(A, (1, m), 'd')\n b = cvx.matrix(b, (1,1), 'd')\n cvx.solvers.options['show_progress'] = showProgress\n sol = cvx.solvers.qp(P=Q, q=p, G=G, h=h, A=A, b=b)\n\n # Alphas\n alphas = np.matrix(sol['x'])\n\n # Finding the bias\n def findBias ():\n epsilon = 1e-5\n for idx, alp in enumerate(alphas):\n if (alp - 0 > epsilon and 1 - alp > epsilon):\n KM = gaussianKM (X[idx], X[idx], gamma)\n AlphaY = np.multiply (alphas, Y)\n AlphaY = np.repeat(AlphaY, 1, axis=1)\n KMalphaY = np.multiply (KM, AlphaY)\n KMalphaY = np.sum(KMalphaY, axis=0)\n b = float (Y[idx, 0] - KMalphaY)\n return b\n \n b = findBias ()\n\n # Finding the support vectors\n if (showProgress):\n epsilon = 1e-5\n sv = []\n for idx, alp in enumerate(alphas):\n if (alp - 0 > epsilon and 1 - alp > epsilon):\n sv.append(alp)\n with open('Q2/support-vectors/gaussian.vectors', 'w') as f:\n for v in sv:\n f.write(\"%.3f\\n\" % v)\n print (\"Number of Support Vectors: \", len(sv))\n # else:\n\n epsilon = 1e-5\n nSV = np.sum( (np.array(alphas) > epsilon) & (np.array(alphas) < 1 - epsilon), axis=0)\n print (\"Number of Support Vectors: \", nSV)\n\n # Saving the model\n model = (alphas, b)\n with open(modelfile, 'wb') as handle:\n pickle.dump(model, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n print (\"Time taken for gaussian CVXOPT training: \", time.time() - tick)", "def read_mb_file(self,idir='.',ifile=None, gmt=True, verbose=False):\n \n import numpy as np\n import os\n \n if gmt==True:\n gmt_file=idir+'/../maps/en_velo.gmt'\n if isinstance(gmt,str):\n gmt_file=gmt\n \n if gmt != False:\n self.read_lon_lat(gmt_file,verbose=verbose)\n \n if ifile is None:\n mb_file_basename= idir + '/mb_'+self.code+'_GPS.dat'\n else:\n mb_file_basename=ifile\n \n data_NEU = []\n for i in range(1,4):\n mb_file = mb_file_basename + str(i)\n\n # file\n self.ifile=os.path.abspath(mb_file)\n \n data=np.genfromtxt(mb_file,skip_header=4)\n \n # reshape to ensure a 2D array\n if len(data.shape)==1:\n data=data.reshape((1,data.shape[0]))\n \n\n\n data_NEU.append(data)\n\n if data_NEU[0].shape == data_NEU[1].shape == data_NEU[2].shape:\n self.data=np.zeros((data_NEU[0].shape[0],7))\n self.data[:,0]=data_NEU[0][:,0]\n self.data[:,1]=data_NEU[0][:,1]#*to_mm\n self.data[:,2]=data_NEU[1][:,1]#*to_mm\n self.data[:,3]=data_NEU[2][:,1]#*to_mm\n\n self.data[:,4]=data_NEU[0][:,2]#*to_mm\n self.data[:,5]=data_NEU[1][:,2]#*to_mm\n self.data[:,6]=data_NEU[2][:,2]#*to_mm\n\n else: \n print(\"!!! Error reading \",mb_file_basename,\" :*dat1, *dat2, *dat3 do not have the same length\")\n self.data = None", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_train = np.load(\"data/X_train.npy\")\n\t\t\t\tX_val = np.load(\"data/X_val.npy\")\n\t\t\t\tY_train = np.load(\"data/Y_train.npy\")\n\t\t\t\tY_val = np.load(\"data/Y_val.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tdata_temp = np.zeros((50000,64,64,3))\n\t\t\t\tlabel_temp = []\n\n\t\t\t\tfor i in range(5):\n\n\t\t\t\t\tfile = path + str(i+1)\n\t\t\t\t\twith open(file, 'rb') as fo:\n\t\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\t\tlabel_temp.extend(temp_element[b'labels'])\n\n\t\t\t\t\tfor j in range(10000):\n\t\t\t\t\t\tdata_temp[j+(i*10000)] = self._reshape(temp_data[j])\n\n\t\t\t\tlabel_temp = np.eye(10)[np.array(label_temp)]\n\n\t\t\t\tnp.random.seed(123)\n\t\t\t\tpermutations = list(np.random.permutation(50000))\n\t\t\t\tX = data_temp[permutations, :, : , :] \n\t\t\t\tY = label_temp[permutations, :]\n\t\t\t\tX_train = X[0:40000, :, :, :] \n\t\t\t\tY_train = Y[0:40000, :]\n\t\t\t\tX_val = X[40000:50000, :, :, :] \n\t\t\t\tY_val = Y[40000:50000, :]\n\n\t\t\t\tnp.save(\"./data/X_train\", X_train)\n\t\t\t\tnp.save(\"./data/X_val\", X_val)\n\t\t\t\tnp.save(\"./data/Y_train\", Y_train)\n\t\t\t\tnp.save(\"./data/Y_val\", Y_val)\n\t\t\t\tbreak\n\n\t\treturn X_train, X_val, Y_train, Y_val", "def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def _load_training_data(self):\n self._save_training_data()", "def read_szx_fmv_13(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"abs_line_number\"\n ]\n\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"num_val_trip\", ulong_nan), (\"f_kp\", byte_nan),\n (\"f_usable\", byte_nan), (\"land_frac\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n data[\"swath_indicator\"] = data.pop(\"swath indicator\")\n\n data[\"f_land\"] = data.pop(\"land_frac\")\n\n return data, metadata", "def execute(cf):\n\n ##Ports and parameters\n train_set = cf.get_input(\"train_set\") #training set. Typically even_file\n test_set = cf.get_input(\"test_set\") #test set. Typically odd_file\n WM1 = cf.get_input(\"WM1\")\n WM2 = cf.get_input(\"WM2\")\n WM3 = cf.get_input(\"WM3\")\n WM4 = cf.get_input(\"WM4\")\n WM5 = cf.get_input(\"WM5\")\n WM6 = cf.get_input(\"WM6\")\n WM7 = cf.get_input(\"WM7\")\n WM8 = cf.get_input(\"WM8\")\n WM9 = cf.get_input(\"WM9\")\n WM10 = cf.get_input(\"WM10\")\n WM11 = cf.get_input(\"WM11\")\n WM12 = cf.get_input(\"WM12\")\n WM13 = cf.get_input(\"WM13\")\n WM14 = cf.get_input(\"WM14\")\n WM15 = cf.get_input(\"WM15\")\n WM16 = cf.get_input(\"WM16\")\n WM17 = cf.get_input(\"WM17\")\n WM18 = cf.get_input(\"WM18\")\n WM19 = cf.get_input(\"WM19\")\n WM20 = cf.get_input(\"WM20\")\n WMdir = cf.get_input(\"WMdir\")\n WMdir2 = cf.get_input(\"WMdir2\")\n basefreqs = cf.get_input(\"BaseFrequencies\")\n ufemodel_path = cf.get_input(\"UFEmodel\")\n\n bestWM = cf.get_output(\"BestWM\")\n log_file = cf.get_output(\"log_file\")\n interm = cf.get_output(\"intermediate\")\n\n genome = cf.get_parameter('genome', 'string')\n motevo_path = cf.get_parameter('motevo_path', 'string')\n aligned = cf.get_parameter(\"aligned\", \"boolean\")\n\n os.mkdir(interm)\n\n\n\n # Read stuff in\n WMs = [i for i in[WM1, WM2, WM3, WM4, WM5, WM6, WM7, WM8, WM9, WM10, WM11, WM12, WM13, WM14, WM15, WM16, WM17, WM18, WM19, WM20] if i]\n\n if WMdir:\n WMs += [os.path.join(WMdir, wm) for wm in os.listdir(WMdir)]\n\n if WMdir2:\n WMs += [os.path.join(WMdir2, wm) for wm in os.listdir(WMdir2)]\n\n f = open(basefreqs)\n ATfreq = float(f.readline().strip().split()[1])\n GCfreq = float(f.readline().strip().split()[1])\n f.close()\n\n\n # Compute stuff: optimal priors and then likelihood of test set\n optpriors = []\n logliks = []\n\n for i, WM in enumerate(WMs):\n\n wmlen = len(open(WM).readlines())-4\n\n # 1. Fit prior on training set with EM\n tag = 'fitP_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=1, bgorder=0, bgprior=0.99)\n r = runMotevo(motevo_path, train_set, params, WM, interm, tag)\n if r != 0:\n print 'motevo failed ', tag\n sys.exit(1)\n\n # prior file:\n # WM_name final_prior nr_of_sites density\n # /import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/PipeLineSource/TESTRUN/NRF1_Z2/OUTPUT/NRF1_FgBg-runmotevoPG2_1/Logo 0.016554 635.008 0.251863\n # background 0.983446 37724.8 0.748137\n # UFEwm 0 0 0\n\n optprior = float(open(priors).readlines()[1].split()[1])\n bgprior=(1-optprior)\n print bgprior\n\n # 2. Compute log-likelihood on test set with optimal prior from training set and without EM\n tag = 'compLL_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=0, bgorder=0, bgprior=bgprior)\n runMotevo(motevo_path, train_set, params, WM, interm, tag)\n\n a = loadtxt(loglikfile, usecols=[1])\n ll = sum(a)\n\n logliks.append(ll)\n optpriors.append(optprior)\n\n print logliks\n\n\n\n #replace name in WM file with bestWM\n lines = open(WMs[argmax(logliks)]).readlines()\n lines[1] = 'NA BestWM\\n'\n bwm = open(bestWM, 'w')\n bwm.write(''.join(lines))\n\n\n l = open(log_file, 'w')\n\n l.write('WM_name\\tWM_path\\tlog_likelihood\\topt_prior\\n')\n\n names = ['WM_%i\\t%s\\t%.4f\\t%s' %(i+1, WMs[i], logliks[i], optpriors[i]) for i in arange(len(WMs))]\n\n l.write('\\n'.join(names))\n l.close()\n\n\n return 0", "def load(self):\n results = pd.read_csv(f'data/binomial_experiment_{self.margin}_C_{self.C}.csv')\n\n self.minimal_R = np.zeros((len(self.Ms), self.estimations, len(self.epsilons)))\n for i, M in enumerate(self.Ms):\n for l in range(self.estimations):\n self.minimal_R[i,l,:] = np.array(results[results['M'] == M])[l,2:]\n return", "def test_large_flmb(self):\n test_files_218 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-218*.mdd')\n\n mdd.procall(test_files_218)\n\n data_orig = self.read_full_file('node59p1.dat')\n\n # two status files from different controllers, 12371 and 12365\n data_out_71 = self.read_full_file('node59p1_0.status_1237101.dat')\n self.check_sio_type(data_out_71, ['CS', 'PS'])\n data_out_65 = self.read_full_file('node59p1_0.status_1236501.dat')\n self.check_sio_type(data_out_65, ['CS', 'PS'])\n data_out = data_out_71\n data_out += data_out_65\n\n data_adcps = self.read_full_file('node59p1_0.adcps_1237111.dat')\n self.check_sio_type(data_adcps, ['AD'])\n data_out += data_adcps\n\n data_ctdmo = self.read_full_file('node59p1_0.ctdmo_1237100.dat')\n self.check_sio_type(data_ctdmo, ['CT', 'CO'])\n data_out += data_ctdmo\n\n data_dosta = self.read_full_file('node59p1_0.dosta_1236501.dat')\n self.check_sio_type(data_dosta, ['DO'])\n data_out += data_dosta\n\n data_flort = self.read_full_file('node59p1_0.flort_1236501.dat')\n self.check_sio_type(data_flort, ['FL'])\n data_out += data_flort\n\n data_phsen = self.read_full_file('node59p1_0.phsen_1236501.dat')\n self.check_sio_type(data_phsen, ['PH'])\n data_out += data_phsen\n\n if not TestSioUnpack.compare_sio_matches(data_orig, data_out):\n self.fail(\"Failed sio block compare\")\n\n test_files = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-205*.mdd')\n test_files_217 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-217*.mdd')\n test_files_219 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-219*.mdd')\n\n test_files.extend(test_files_217)\n test_files.extend(test_files_219)\n\n mdd.procall(test_files)\n\n data_out = self.compare_node59(1, data_out)\n\n test_files = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-233*.mdd')\n test_files_231 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-231*.mdd')\n\n test_files.extend(test_files_231)\n\n mdd.procall(test_files)\n\n self.compare_node59(2, data_out)", "def load_from_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.load_from_disk(file_name)", "def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)", "def read_file(self, filename=None):\n print(f'reading file')\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read second line in file to get number of variables\n f.seek(0)\n f.readline()\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= (nvars_file + 2)\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i+1\n except KeyError:\n pass\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n base_r[i] = variables[2]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[i, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def load_e_form():\n path = os.path.join(DATA_DIR, \"eform-materialsproject-85014.csv\")\n df = pd.read_csv(path, index_col=\"mpid\")\n return df", "def main(workdir):\n dir = os.path.expanduser(workdir)\n \n #read the .dat file\n f = open('{}smi.dat'.format(dir))\n par = imp.load_source('par', '', f)\n \n #make a sdf file for visualization\n output = pybel.Outputfile(\"sdf\", dir + \"species.sdf\",overwrite=True)\n for name in par.smiles:\n smi = par.smiles[name]\n obmol = pybel.readstring(\"smi\",smi)\n output.write(obmol)\n output.close()\n \n #list with the jobs that need to be done\n jobs = []\n \n #iterate the input files\n for name in par.smiles:\n #name = input_file.replace('.inp','') #name of the calculation\n test_dir = dir + name #location where the calculations will be done\n if not os.path.exists(test_dir):\n os.mkdir(test_dir)\n \n #copy the input file to the working directory\n write_input_file(par,name,par.smiles[name],test_dir + '/input.inp')\n job = workdir + name + '/'\n jobs.append(job)\n \n run_threads(jobs, 'eric', max_running = 3)", "def unpack(self, buff, verbose=0):\n\n\n # See https://docs.python.org/3/library/struct.html#struct.pack\n # for struck pack format\n\n # Local methods to unpack numbers in little-endian format\n idx={'x':0}\n\n def read_uint8():\n idx['x']+=1\n return struct.unpack('<B', buf[idx['x']-1:idx['x']])[0]\n def read_uint32():\n idx['x']+=4\n return struct.unpack('<I', buf[idx['x']-4:idx['x']])[0]\n def read_float32():\n idx['x']+=4\n return struct.unpack('<f', buf[idx['x']-4:idx['x']])[0]\n\n # Return empty model in case the byte-array contains no information\n if len(buf) == 0:\n return None\n\n # Read global stddev and mean (not used in RQRMI version 1.1)\n _=read_float32()\n _=read_float32()\n\n num_of_stages=read_uint32()\n _log(verbose, 'Num of stages: %d' % num_of_stages)\n\n # Preallocate array\n trained_rqrmi=[None for _ in range(num_of_stages)]\n\n for s in range(num_of_stages):\n\n # Read the current stage\n num_of_models=read_uint32()\n\n _log(verbose, '\\nStage %d num of models: %d' % (s, num_of_models))\n\n # Preallocate net_list\n net_list=[None for _ in range(num_of_models)]\n\n for m in range(num_of_models):\n # Read version\n version=read_uint8()\n if version==0:\n _log(verbose, '\\nSkipping model <%d,%d>: model not compiled' % (s, m))\n continue\n elif version!=2:\n _log(verbose, '\\nUnsupported version for model <%d,%d>' % (s, m))\n continue\n\n _log(verbose, '\\nLoading model <%d, %d>: ' % (s,m))\n\n # Read model parameters\n mu=read_float32()\n sig=read_float32()\n fac=read_float32()\n omin=read_float32()\n num_of_layers=read_uint32()\n _log(verbose, 'layers: %d, ' % num_of_layers)\n\n # Preallocate net values\n net_values=[None for _ in range(2*num_of_layers-1)]\n\n # Read network structure\n structure=[None for _ in range(num_of_layers)]\n for l in range(num_of_layers):\n structure[l]=read_uint32()\n\n # Layer 0 bias\n net_values[0]=np.empty(structure[0])\n\n # Preallocate all other layers\n for l in range(1, num_of_layers):\n net_values[2*l-1]=np.empty(structure[l]) # Layer bias\n net_values[2*l-0]=np.empty([structure[l-1], structure[l]]) # Layer weights\n\n _log(verbose, 'structure: [%s]' % ','.join([str(x) for x in structure]))\n\n # Read values of first layer\n net_values[0][0]=read_float32()\n _=read_float32() # First layer weight is one (always)\n\n # Read values\n for l in range(1, num_of_layers):\n # Read bias\n for i in range(structure[l]):\n net_values[2*l-1][i]=read_float32()\n # Read weights\n for y in range(structure[l-1]):\n for x in range(structure[l]):\n net_values[2*l][y,x]=read_float32()\n\n # Update stage's net list\n net_list[m]=(mu, sig, fac, omin, net_values)\n\n # Update output with stage\n trained_rqrmi[s] = net_list\n\n # Read the maximum error of each last stage submodel\n self.error_list = []\n for e in range(len(self.trained_rqrmi[-1])):\n self.error_list.append(read_uint32())\n\n _log(verbose, '\\n')\n self.trained_rqrmi = trained_rqrmi", "def load_model(self, path):\n self._saver.restore(self._sess, path + '/model.ckp')\n pkl_file = open(path + '/som.pkl', 'rb')\n restored = pickle.load(pkl_file)\n pkl_file.close()\n self._m = restored['_m']\n self._n = restored['_n']\n self._neighbourhood = restored['_neighbourhood']\n # self._topography = restored['_topography']\n self._num_iterations = restored['_num_iterations']\n self._Wts = restored['_Wts']\n self._locations = restored['_locations']\n self._learned = restored['_learned']\n self._centroid_grid = restored['_centroid_grid']\n self.abnormal_dist = restored['abnormal_dist']\n\n print(\"Model restored from path: \" + path)", "def learn(filePath):\n filename = filePath.stem\n processedJAFFE = load(str(filePath))\n processedDF = pd.DataFrame(processedJAFFE)\n processedDF.columns = ['name', 'data', 'emotion']\n processedDF = processedDF.sort_values(by=['name', 'emotion'])\n grouped = processedDF.groupby(['name', 'emotion'])\n train = grouped.nth([0, 1])\n test = grouped.nth([2, 3, 4])\n\n yTrain = train.index.get_level_values(1).tolist()\n xTrain = train.values.ravel().tolist()\n yTest = test.index.get_level_values(1).tolist()\n xTest = test.values.ravel().tolist()\n\n parameters = {\n 'C': [\n 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08, 1.00E-07, 1.00E-06,\n 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02, 1.00E-01, 1.00,\n 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05\n ],\n 'gamma': [\n 1.00E00,\n 1.00E-01,\n 1.00E-02,\n 1.00E-03,\n 5.00E-04, 2.00E-04, 1.50E-04, 1.10E-04, 1.05E-04, 1.00E-04,\n 9.50E-05, 9.00E-05, 7.00E-05, 5.00E-05, 1.90E-05, 1.00E-05,\n 1.00E-06,\n 1.00E-07,\n ],\n }\n\n models = []\n models.append(['gamma \\\\ C', 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08,\n 1.00E-07, 1.00E-06, 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02,\n 1.00E-01, 1.00, 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05 ])\n gridTimeStart = time()\n numIteration = len(parameters['gamma']) * len(parameters['C'])\n iteration = 0\n meanTime = 0\n for gamma in parameters['gamma']:\n row = [gamma]\n for C in parameters['C']:\n print('C = %s \\t gamma = %s'%(C, gamma))\n timeStart = time()\n svc = OneVsRestClassifier(SVC(random_state=0, decision_function_shape='ovr',\n C=C, kernel='rbf', gamma=gamma), n_jobs=4)\n svc.fit(xTrain, yTrain)\n yTrue, yPred = yTest, svc.predict(xTest)\n yTrue = np.array(yTrue, dtype=np.unicode_)\n yPred = np.array(yPred, dtype=np.unicode_)\n correct = np.sum(yTrue == yPred)\n \n print(\"accuracy: %d/%d = \"%(correct, len(yTrue)),\n D('%.2f'%(correct/len(yTrue)*100)))\n row.append(D('%.2f'%(correct/len(yTrue)*100)))\n \n iterTime = time()-timeStart\n iteration = iteration + 1\n meanTime = meanTime * (iteration-1)/iteration + iterTime/iteration\n remainingTime = (numIteration-iteration)*meanTime\n print('--------------------------(%d sec)--remaining: %s'%\n (iterTime, str(timedelta(seconds=int(remainingTime)))))\n models.append(row)\n gridTime = time() - gridTimeStart\n gridTime = timedelta(seconds=int(gridTime))\n print('time: %s'%str(gridTime))\n print('saving file: %s.csv'%filename)\n with open('../csv/%s.csv'%filename, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerows(models)", "def download_data(dev_mode: str, model: word2vec.Word2Vec) -> (np.ndarray, np.ndarray):\n assert dev_mode.lower() == 'false' or dev_mode.lower() == 'true'\n \n if dev_mode.lower() == 'false':\n print('Using Actual Data...')\n data_path = os.path.join(args.data_dir, 'HIV.csv')\n df = pd.read_csv(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(Chem.MolFromSmiles(x['smiles']), 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['HIV_active'].astype(int))\n else:\n # use example data set\n data_path = os.path.join(args.data_dir, 'ames.sdf')\n df = PandasTools.LoadSDF(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(x['ROMol'], 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['class'].astype(int))\n \n return X,y", "def test_downloadModel(self):\n\t\tmodel_in = \"\"\n\t\tquery_localdirs = cancerscope.get_models.findmodel(os.path.dirname(cancerscope.__file__), \"v1_rm500\")\n\t\tif query_localdirs is not None:\n\t\t\tmodel_in = query_localdirs[\"v1_rm500\"]\n\t\telse:\n\t\t\tmodel_in = cancerscope.get_models.downloadmodel(model_label=\"v1_rm500\")\n\t\t\n\t\tself.assertTrue(os.path.isdir(model_in))\n\t\tself.assertTrue(os.path.exists(\"\".join([model_in, \"/lasagne_bestparams.npz\"])))\n\t\t\n\t\t\"\"\"Test if model can be setup correctly\"\"\"\n\t\tlmodel = cancerscope.scopemodel(model_in)\n\t\tlmodel.fit()\n\t\n\t\tself.assertEqual(len(lmodel.features), 17688)", "def retrieve_additional_files(input_qchem, data_fchk, work_dir, scratch_read_level=0):\n\n additional_data = {}\n\n natom = len(input_qchem.molecule.get_coordinates())\n file_list = os.listdir(work_dir)\n\n # OLD_DIMENSIONS\n if '819.0' in file_list:\n with open(work_dir + '819.0', 'r') as f:\n data = np.fromfile(f, dtype=np.int32)\n norb_alpha, norb_beta = data[0:2]\n norb = norb_alpha\n nbas = norb # assumption\n else:\n norb = np.shape(data_fchk['coefficients']['alpha'])[0]\n nbas = np.shape(data_fchk['coefficients']['alpha'])[1]\n\n\n # MO_COEFS (Already in fchk) in internal order\n if '53.0' in file_list and 'coefficients' in data_fchk:\n with open(work_dir + '53.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n mo_alpha = data[:norb*nbas].reshape(-1, norb).tolist()\n mo_beta = data[norb*nbas: 2*norb_beta*nbas].reshape(-1, norb_beta).tolist()\n # additional_data['coefficients_internal'] = {'alpha': mo_alpha, 'beta': mo_beta}\n\n # obtain the order indices between fchk order and Q-Chem internal order of basis functions\n diff_square = get_sdm(data_fchk['coefficients']['alpha'], mo_alpha)\n\n # get non-repeating indices\n indices = []\n for row in diff_square.T:\n for i in np.argsort(row):\n if i not in indices:\n indices.append(int(i))\n break\n\n # indices = np.argmin(diff_square, axis=0).tolist()\n\n # store q-chem index order for later use (e.g guess)\n data_fchk['coefficients']['qchem_order'] = indices\n else:\n indices = list(range(nbas))\n\n # FOCK_MATRIX\n if '58.0' in file_list:\n with open(work_dir + '58.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n fock_alpha = data[:nbas*nbas].reshape(-1, nbas)\n fock_beta = data[nbas*nbas: 2*nbas*nbas].reshape(-1, nbas)\n\n # set basis functions in fchk order\n fock_alpha = fock_alpha[:, indices]\n fock_alpha = fock_alpha[indices, :]\n fock_beta = fock_beta[:, indices]\n fock_beta = fock_beta[indices, :]\n\n additional_data['fock_matrix'] = {'alpha': fock_alpha.tolist(), 'beta': fock_beta.tolist()}\n\n if scratch_read_level == -1:\n # FILE_ENERGY (Not really worth to read it)\n if '99.0' in file_list:\n with open(work_dir + '99.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n\n # FILE_DENSITY_MATRIX (Already in fchk)\n if '54.0' in file_list:\n with open(work_dir + '54.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n density_alpha = data[:nbas*nbas].reshape(-1, nbas)\n density_beta = data[nbas*nbas: 2*nbas*nbas].reshape(-1, nbas)\n # set basis functions in fchk order\n density_alpha = density_alpha[:, indices]\n density_alpha = density_alpha[indices, :]\n density_beta = density_beta[:, indices]\n density_beta = density_beta[indices, :]\n additional_data['scf_density_internal'] = {'alpha': density_alpha.tolist(), 'beta': density_beta.tolist()}\n\n # HESSIAN_MATRIX\n if '132.0' in file_list:\n with open(work_dir + '132.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n hessian = data.reshape(-1, natom*3)\n additional_data['hessian'] = hessian.tolist()\n\n # AO_INTS_DEBUG\n if '21.0' in file_list:\n with open(work_dir + '21.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n ao_integrals = data.reshape(-1, nbas, nbas, nbas)\n\n # set basis functions in fchk order\n ao_integrals = ao_integrals[:, :, :, indices]\n ao_integrals = ao_integrals[:, :, indices, :]\n ao_integrals = ao_integrals[:, indices, :, :]\n ao_integrals = ao_integrals[indices, :, :, :]\n\n additional_data['ao_integrals'] = ao_integrals.tolist()\n\n if scratch_read_level > 0:\n # FILE_RAS_AMP\n if '704.0' in file_list:\n with open(work_dir + '705.0', 'r') as f:\n ras_energies = np.fromfile(f, dtype=float)\n n_ras_roots = len(ras_energies)\n\n with open(work_dir + '704.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n ras_amplitudes = data.reshape(n_ras_roots, -1)\n additional_data['ras_amplitudes'] = ras_amplitudes.tolist()\n\n return additional_data", "def _load20news_miao():\n DIR = os.path.dirname(os.path.realpath(__file__)).split('vae_sparse')[0]+'vae_sparse/optvaedatasets'\n DIR += '/20news_miao'\n h5file = DIR+'/miao.h5'\n if not os.path.exists(h5file):\n flen = len(open(DIR+'/vocab').readlines())\n print 'DIM: ',flen\n np.random.seed(1)\n TRAIN_VALID_MAT = readSparseFile(DIR+'/train.feat', flen, zeroIndexed=False)\n idx = np.random.permutation(TRAIN_VALID_MAT.shape[0])\n VALIDMAT = TRAIN_VALID_MAT[idx[:500]]\n TRAINMAT = TRAIN_VALID_MAT[idx[500:]]\n TESTMAT = readSparseFile(DIR+'/test.feat', flen, zeroIndexed=False) \n saveSparseHDF5(TRAINMAT,'train', h5file)\n saveSparseHDF5(VALIDMAT,'valid', h5file)\n saveSparseHDF5(TESTMAT, 'test' , h5file)\n dset = {}\n dset['vocabulary']= [k.strip().split(' ')[0] for k in open(DIR+'/vocab').readlines()]\n dset['train'] = loadSparseHDF5('train',h5file)\n dset['valid'] = loadSparseHDF5('valid',h5file)\n dset['test'] = loadSparseHDF5('test',h5file)\n dset['dim_observations'] = dset['train'].shape[1]\n dset['data_type'] = 'bow'\n return dset", "def read(f):\n \n if isinstance(f, basestring):\n # If the input is a string, treat as file name\n with open(f) as fh: # Ensure file is closed\n return read(fh) # Call again with file object\n \n # First line contains the date\n date = f.readline()\n if not date:\n raise IOError(\"Cannot read from input file \"+str(filename))\n \n # Second is description\n desc = f.readline()\n \n token = file_numbers(f)\n \n # Third contains number of mesh points\n try:\n npsi = int(token.next())\n ntheta = int(token.next())\n isym = int(token.next())\n except StopIteration:\n raise IOError(\"Unexpected end of file while reading grid size\")\n except ValueError:\n raise IOError(\"Third line should contain npsi, ntheta and isym\")\n \n # Check values\n if (isym < 0) or (isym > 1):\n raise IOError(\"isym must be either 0 or 1\")\n if (npsi < 1) or (ntheta < 1):\n raise IOError(\"Invalid npsi=\"+str(npsi)+\" or ntheta=\" + str(ntheta))\n \n # Read normalisation factors\n\n try:\n rcnt = float(token.next())\n xma = float(token.next())\n zma = float(token.next())\n btor = float(token.next())\n curtot = float(token.next())\n eaxe = float(token.next())\n dnorm = float(token.next())\n except:\n raise IOError(\"Couldn't read normalisation factors\")\n \n def read_array(n, name=\"Unknown\"):\n data = np.zeros([n])\n try:\n for i in np.arange(n):\n data[i] = float(token.next())\n except:\n raise IOError(\"Failed reading array '\"+name+\"' of size \", n)\n return data\n\n def read_2d(nx, ny, name=\"Unknown\"):\n data = np.zeros([nx, ny])\n for i in np.arange(nx):\n data[i,:] = read_array(ny, name+\"[\"+str(i)+\"]\")\n return data\n\n # Read 1D arrays\n psiflux = read_array(npsi, \"psiflux\")\n fnorm = read_array(npsi, \"fnorm\")\n ffpnorm = read_array(npsi, \"ffpnorm\")\n ponly = read_array(npsi, \"ponly\")\n pponly = read_array(npsi, \"pponly\")\n qsf = read_array(npsi, \"qsf\")\n d = read_array(npsi, \"d\")\n \n dpdz = read_array(ntheta, \"dpdz\")\n dpdr = read_array(ntheta, \"dpdr\")\n \n # 2D arrays\n \n xnorm = read_2d(ntheta, npsi, \"xnorm\")\n znorm = read_2d(ntheta, npsi, \"znorm\")\n \n # Try to read Br and Bz (may be present)\n try:\n Br = read_2d(ntheta, npsi, \"Br\")\n Bz = read_2d(ntheta, npsi, \"Bz\")\n except:\n Br = Bz = None\n \n ny = ntheta\n\n if isym == 1:\n # Fill in values for up-down symmetric case\n print(\"Grid is up-down symmetric. Reflecting grid about midplane\")\n ny = tsize = 2*(ntheta - 1) + 1\n \n def reflect(data, mapfunc = lambda x:x):\n \"\"\" Reflect a variable about midplane\n Optionally supply a mapping function\"\"\"\n data2 = np.zeros([tsize, npsi])\n # Copy the original data\n for i in np.arange(ntheta):\n data2[i,:] = data[i,:]\n # Now fill in the remainder\n for i in np.arange(ntheta, tsize):\n t0 = tsize - 1 - i\n data2[i,:] = mapfunc(data[t0,:])\n return data2\n \n xnorm = reflect(xnorm)\n znorm = reflect(znorm, lambda x: 2.*zma - x) # Reflect about zma\n if Br != None:\n Br = reflect(Br, lambda x:-x) # Br reverses\n if Bz != None:\n Bz = reflect(Bz) # Bz remains the same\n theta = tsize\n\n # Make sure we have Br, Bz and Bpol\n\n if (Br == None) or (Bz == None):\n # Calculate Bpol from psi then Br and Bz from Bpol\n # Use dpsi = R*Bp dx (for now)\n Bpol = np.zeros([ny, npsi])\n \n def deriv(f):\n n = np.size(f)\n dfdi = np.zeros(n)\n dfdi[1:-1] = (f[2:n] - f[0:-2])/2. # Central difference in the middle\n dfdi[0] = f[1] - f[0]\n dfdi[-1] = f[-1] - f[-2]\n return dfdi\n \n for i in np.arange(ntheta):\n drdi = deriv(xnorm[i, :])\n dzdi = deriv(znorm[i, :])\n dldi = sqrt(drdi**2 + dzdi**2) # Arc length\n dpsidi = deriv(psiflux)\n \n Bpol[i, :] = dpsidi / (dldi * xnorm[i,:])\n else:\n Bpol = np.sqrt(Br**2 + Bz**2)\n \n # Calculate toroidal field\n Btor = fnorm / xnorm\n \n #########################################\n # Create a dictionary of values to return\n # \n # Need to transpose 2D arrays to [psi, theta] \n # to be consistent with elite inputs\n \n var = {\"npsi\":npsi, \"npol\":ny, # Sizes\n \n \"psi\":psiflux,\n \"f(psi)\":fnorm,\n \"p\":ponly,\n \n \"R\": np.transpose(xnorm),\n \"Z\": np.transpose(znorm),\n\n \"Bp\":np.transpose(Bpol),\n \"Bt\":np.transpose(Btor),\n\n \"q\":qsf,\n\n \"ffprime\":ffpnorm,\n \"pprime\":pponly}\n\n if Br != None:\n var['Br'] = np.transpose(Br)\n if Bz != None:\n var['Bz'] = np.transpose(Bz)\n \n return var" ]
[ "0.590859", "0.5844918", "0.5836278", "0.57506764", "0.5689922", "0.565109", "0.5614794", "0.55639184", "0.55183434", "0.5517771", "0.5503399", "0.5463083", "0.5459767", "0.54251224", "0.5416442", "0.5392664", "0.5355077", "0.53432804", "0.5338355", "0.53167695", "0.53060263", "0.5300313", "0.52990466", "0.5294528", "0.5285144", "0.52764696", "0.52449805", "0.5238722", "0.5230759", "0.5229293", "0.5220341", "0.5207541", "0.5202847", "0.5186163", "0.51727396", "0.5168217", "0.5168121", "0.51587564", "0.5157575", "0.5156601", "0.51506037", "0.5144636", "0.5136459", "0.51357245", "0.51289415", "0.5127496", "0.51256764", "0.51190746", "0.51188916", "0.5107517", "0.51070607", "0.5102604", "0.51008147", "0.50868785", "0.50836647", "0.50803185", "0.50785387", "0.5067348", "0.5062728", "0.50616777", "0.5061545", "0.5057816", "0.5057292", "0.50533336", "0.5027981", "0.5026874", "0.50145936", "0.5010863", "0.5010863", "0.50108397", "0.5010455", "0.50071555", "0.500521", "0.500267", "0.49928284", "0.49883023", "0.4988067", "0.49809754", "0.4980833", "0.49773696", "0.4972483", "0.4969894", "0.4969284", "0.49648198", "0.495959", "0.4957055", "0.49545744", "0.49528798", "0.49513024", "0.49508724", "0.4950774", "0.4946828", "0.49464223", "0.49444637", "0.49438277", "0.49368033", "0.49333045", "0.49276766", "0.49273193", "0.4913911", "0.49130055" ]
0.0
-1
Store the camera intrinsics. We need this for the calibration matrices from the Tango
def new_camera_intrinsics_callback(self, new_camera_info): self.camera_intrinsics = new_camera_info self.k_mat = np.matrix( np.array(self.camera_intrinsics.K).reshape((3, 3)) ) self.k_inv = self.k_mat.I
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_intrinsics(self, save_dir):\n if not osp.isfile(\n osp.join(save_dir, 'intrinsics', 'intrinsics.npy')):\n np.save(osp.join(\n save_dir, 'intrinsics', 'intrinsics'), self.camera_model.K)", "def load_extrinsics(self):\n return self.um.read_json(\"extrinsics.json\")", "def load_calib(self):\n # We'll build the calibration parameters as a dictionary, then\n # convert it to a namedtuple to prevent it from being modified later\n data = {}\n\n # Load the calibration file\n calib_filepath = os.path.join(self.sequence_path, 'calib.txt')\n filedata = utils.read_calib_file(calib_filepath)\n\n # Create 3x4 projection matrices\n P_rect_00 = np.reshape(filedata['P0'], (3, 4))\n P_rect_10 = np.reshape(filedata['P1'], (3, 4))\n P_rect_20 = np.reshape(filedata['P2'], (3, 4))\n P_rect_30 = np.reshape(filedata['P3'], (3, 4))\n\n # Compute the rectified extrinsics from cam0 to camN\n T1 = np.eye(4)\n T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]\n T2 = np.eye(4)\n T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]\n T3 = np.eye(4)\n T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]\n\n # Compute the velodyne to rectified camera coordinate transforms\n data['T_cam0_velo'] = np.reshape(filedata['Tr'], (3, 4))\n data['T_cam0_velo'] = np.vstack([data['T_cam0_velo'], [0, 0, 0, 1]])\n data['T_cam1_velo'] = T1.dot(data['T_cam0_velo'])\n data['T_cam2_velo'] = T2.dot(data['T_cam0_velo'])\n data['T_cam3_velo'] = T3.dot(data['T_cam0_velo'])\n\n # Compute the camera intrinsics\n data['K_cam0'] = P_rect_00[0:3, 0:3]\n data['K_cam1'] = P_rect_10[0:3, 0:3]\n data['K_cam2'] = P_rect_20[0:3, 0:3]\n data['K_cam3'] = P_rect_30[0:3, 0:3]\n\n # Compute the stereo baselines in meters by projecting the origin of\n # each camera frame into the velodyne frame and computing the distances\n # between them\n p_cam = np.array([0, 0, 0, 1])\n p_velo0 = np.linalg.inv(data['T_cam0_velo']).dot(p_cam)\n p_velo1 = np.linalg.inv(data['T_cam1_velo']).dot(p_cam)\n p_velo2 = np.linalg.inv(data['T_cam2_velo']).dot(p_cam)\n p_velo3 = np.linalg.inv(data['T_cam3_velo']).dot(p_cam)\n\n data['b_gray'] = np.linalg.norm(p_velo1 - p_velo0) # gray baseline\n data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline\n\n self.calib = namedtuple('CalibData', data.keys())(*data.values())", "def intrinsics(self) -> 'Intrinsics':\n return self._intrinsics", "def intrinsics_json(json_path):\n with open(json_path) as json_file:\n # Camera Intrinsic Matrix\n k_mat = np.eye(4, dtype=np.float32) #Idk why size 4? (To match translation?)\n json_data = json.load(json_file)\n k_mat[0, 0] = json_data[\"intrinsic\"][\"fx\"]\n k_mat[1, 1] = json_data[\"intrinsic\"][\"fy\"]\n k_mat[0, 2] = json_data[\"intrinsic\"][\"u0\"]\n k_mat[1, 2] = json_data[\"intrinsic\"][\"v0\"]\n\n # Transformation Mat between cameras\n stereo_t = np.eye(4, dtype=np.float32)\n stereo_t[0, 3] = json_data[\"extrinsic\"][\"baseline\"]\n\n return {\"K\":k_mat, \"inv_K\":np.linalg.pinv(k_mat), \"baseline_T\":stereo_t}", "def save(self, filename):\n file_root, file_ext = os.path.splitext(filename)\n if file_ext.lower() != INTR_EXTENSION:\n raise ValueError('Extension %s not supported for OrhtographicIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION))\n\n camera_intr_dict = copy.deepcopy(self.__dict__)\n f = open(filename, 'w')\n json.dump(camera_intr_dict, f)\n f.close()", "def saveCameraIntrinsics(cameraList, imageTopics, resultFile):\n cameraModelNames = {acvb.DistortedPinhole: 'pinhole',\n acvb.EquidistantPinhole: 'pinhole',\n acvb.FovPinhole: 'pinhole',\n acvb.Omni: 'omni',\n acvb.DistortedOmni: 'omni',\n acvb.ExtendedUnified: 'eucm',\n acvb.DoubleSphere: 'ds'}\n distortionModels = {acvb.DistortedPinhole: 'radtan',\n acvb.EquidistantPinhole: 'equidistant',\n acvb.FovPinhole: 'fov',\n acvb.Omni: 'none',\n acvb.DistortedOmni: 'radtan',\n acvb.ExtendedUnified: 'none',\n acvb.DoubleSphere: 'none'}\n\n chain = cr.CameraChainParameters(resultFile, createYaml=True)\n for cam_id, cam in enumerate(cameraList):\n cameraModel = cameraModelNames[cam.model]\n distortionModel = distortionModels[cam.model]\n\n # create new config file\n camParams = cr.CameraParameters(resultFile, createYaml=True)\n camParams.setRosTopic(imageTopics[cam_id])\n\n # set the data\n P = cam.geometry.projection()\n if cameraModel == 'omni':\n camParams.setIntrinsics(cameraModel, [P.xi(), P.fu(), P.fv(), P.cu(), P.cv()])\n elif cameraModel == 'pinhole':\n camParams.setIntrinsics(cameraModel, [P.fu(), P.fv(), P.cu(), P.cv()])\n elif cameraModel == 'eucm':\n camParams.setIntrinsics(cameraModel, [P.alpha(), P.beta(), P.fu(), P.fv(), P.cu(), P.cv()])\n elif cameraModel == 'ds':\n camParams.setIntrinsics(cameraModel, [P.xi(), P.alpha(), P.fu(), P.fv(), P.cu(), P.cv()])\n else:\n raise RuntimeError(\"Invalid camera model {}.\".format(cameraModel))\n camParams.setResolution([P.ru(), P.rv()])\n dist_coeffs = P.distortion().getParameters().flatten(1)\n camParams.setDistortion(distortionModel, dist_coeffs)\n\n chain.addCameraAtEnd(camParams)\n\n chain.writeYaml()", "def estimate_extrinsics(dataset):\n # extrinsics are matrices M of shape (3,4) for every datapoint --> M = [R,t] where R=rotation matrix and t = translation vector\n camera_extrinsics_univ = np.zeros(\n (dataset.datadict[\"keypoints_3d_univ\"].shape[0], 3, 4), dtype=np.float\n )\n camera_extrinsics = np.zeros(\n (dataset.datadict[\"keypoints_3d\"].shape[0], 3, 4), dtype=np.float\n )\n\n for i, vid in enumerate(\n tqdm(\n np.unique(dataset.datadict[\"v_ids\"]),\n desc=\"Estimate extrinsics per video\",\n )\n ):\n ids = dataset.datadict[\"v_ids\"] == vid\n kps3d_c = dataset.datadict[\"keypoints_3d\"][ids]\n kps3d_c_univ = dataset.datadict[\"keypoints_3d_univ\"][ids]\n kps3d_w = dataset.datadict[\"keypoints_3d_world\"][ids]\n kps3d_c = np.reshape(kps3d_c, (-1, 3))\n kps3d_c_univ = np.reshape(kps3d_c_univ, (-1, 3))\n kps3d_w = np.reshape(kps3d_w, (-1, 3))\n\n _, M, _ = cv2.estimateAffine3D(\n kps3d_w, kps3d_c, ransacThreshold=10, confidence=0.999\n )\n _, M_univ, _ = cv2.estimateAffine3D(\n kps3d_w, kps3d_c_univ, ransacThreshold=10, confidence=0.999\n )\n\n # returned values correspond to [R,t]^T\n camera_extrinsics[ids] = M\n camera_extrinsics_univ[ids] = M_univ\n\n return camera_extrinsics_univ, camera_extrinsics", "def get_intrinsics(self):\n if self._K is None:\n K = self.original_intrinsics.clone()\n if self.crop is not None:\n K[:2,2] -= torch.tensor(self.crop[:,0], device=K.device, dtype=K.dtype)\n K[:2] *= self.reup_sample / self.down_sample\n self._K = K\n return self._K", "def save_mem_load(self):\n if len(self.get_data_shape())==4 and self._img:\n data = np.zeros(self.get_data_shape())\n self._data = np.rot90(data)\n self._loaded_time_list = [0]\n self._data[..., 0] = np.rot90(self._img.dataobj[..., 0])\n else:\n self._loaded_time_list = [0]\n data = self._img.get_data(caching='unchanged')\n self._data = np.rot90(data)", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def get_calibration_info():\n mjpeg_info_dict = redis_tools.get_dict(db,'mjpeg_info_dict')\n calibration_info = mct_introspection.get_homography_calibration_info()\n for camera in mjpeg_info_dict:\n if not camera in calibration_info:\n calibration_info[camera] = {'modified': ''}\n return calibration_info", "def load_camera_data(file_name):\n assert os.path.isfile(file_name), \"Invalid file {}\".format(file_name)\n import sintel_io\n\n intrinsic, extrinsic = sintel_io.cam_read(file_name)\n return intrinsic, extrinsic", "def cam_calibration():\n # read all calibration images in a folder with similar names\n images = glob.glob('./camera_cal/calibration*.jpg')\n\n # calibrate camera and read object-points (3D), image points (2D) and image shape\n objpoints, imgpoints, img_shape = calibrate_camera(images)\n print(\"DONE: Camera calibration\")\n # save calibration parameters' pickle file\n save_calib_params(objpoints, imgpoints, img_shape)\n print(\"Calibration parameters pickle file saved \")", "def rebuildMatrixCache(self):\n self.converterYUR = Mat4.convertMat(CSYupRight, self.lens.getCoordinateSystem()) * self.lens.getProjectionMat()", "def load(filename):\n file_root, file_ext = os.path.splitext(filename)\n if file_ext.lower() != INTR_EXTENSION:\n raise ValueError('Extension %s not supported for CameraIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION))\n\n f = open(filename, 'r')\n ci = json.load(f)\n f.close()\n return OrthographicIntrinsics(frame=ci['_frame'],\n vol_height=ci['_vol_height'],\n vol_width=ci['_vol_width'],\n vol_depth=ci['_vol_depth'],\n plane_height=ci['_plane_height'],\n plane_width=ci['_plane_width'],\n depth_scale=ci['_depth_scale'])", "def persistent_image_features(images, toStoreFile):\n image_features = extract_features(images)\n\n np.save(toStoreFile, image_features)", "def extract_calibration(self):\n #TODO add function to check if the folder exists because opencv points to other error rather than saying it doesnt exist\n cv_file = cv2.FileStorage(\"calib_images/calibration.yaml\", cv2.FILE_STORAGE_READ)\n camera_matrix = cv_file.getNode(\"camera_matrix\").mat()\n dist_matrix = cv_file.getNode(\"dist_coeff\").mat()\n print(\"[INFO]: Extracted camera parameters.\")\n cv_file.release()\n return camera_matrix, dist_matrix", "def copy(self):\n return CameraExtrinsic(self.position, self.direction, self.up)", "def estimate_pose(self, corners, intrinsics: CameraIntrinsics):\n raise NotImplementedError()", "def writeCameraSettings(self):\n pass", "def loadCameraCalibration(self):\n\n # Read calibration.csv\n with open(\"util/calibration.csv\", 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\", quotechar=\"|\")\n tmp = []\n intrinsic_matrix = []\n distort_coef = []\n i = 0\n for row in csvreader:\n for col in row:\n try:\n tmp.append(float(col))\n except:\n print(\"ERROR in calibration.csv intrinsic matrix\")\n if(i!=3):\n intrinsic_matrix.append(tmp)\n i += 1\n tmp = []\n if(i==3):\n distort_coef = tmp\n tmp = []\n \n return intrinsic_matrix, distort_coef", "def data(self):\n image = transform.warp(self._raw_image_data, self._corrective_transform)\n image_data = transform.rotate(image, self._rotation_offset)\n return image_data", "def intrinsic_matrix_from_camera(w, h, fov):\n (cx, cy), f = calc_focal_values(w, h, fov)\n return np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]])", "def save(self,filepath):\n d = self.X.tocoo(copy=False)\n v = self.col_view.tocoo(copy=False)\n np.savez(filepath,row=d.row,col=d.col,data=d.data,shape=d.shape,\n v_row=v.row,v_col=v.col,v_data=v.data,v_shape=v.shape)", "def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img", "def create_xyz_basic(self):\n if not self.__check_array__():\n return\n\n if self.resize:\n # Assign to new variable as we may want to revert back to using self.data_array with different resizing\n self.data_array_resize = cv2.resize(self.data_array, tuple(self.resize_dims), interpolation=cv2.INTER_CUBIC)\n\n # If we resize te image we need to update the dimensions\n self.num_scans = self.data_array_resize.shape[0]\n self._num_pts = self.data_array_resize.shape[1]\n print(self.num_scans, self._num_pts)\n\n # Create empty matrix to hold all fo data\n self.xyz_array = np.zeros([self.num_scans, self._num_pts, self._len_z])\n\n # assign temperature, distance and angle data to arrays\n self.xyz_array[:, :, :3] = self.data_array_resize\n\n # Iterate through each scan and assign it an arbitrary x coordinate (1st scan is 0, 2nd is 1 etc)\n for x in range(self._num_pts):\n self.xyz_array[:, x, self.x_idx] = x\n\n # Iterate through each scan angle and give an arbitrary y coordinate\n for y in range(self.num_scans):\n # Reverse indices so that we start with bottom of array\n # > np index starts top left as 0,0 but we want to set 0,0 as bottom left so that y increase up the rows\n idx = self.num_scans - (y + 1)\n self.xyz_array[idx, :, self.y_idx] = y\n\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message('XYZ array created successfully!!!')\n else:\n print('XYZ array created successfully!!!')\n\n self.flatten_array()", "def store_image(self):\n cv2.imwrite(self.__diff_filename(), self.__diff_image())", "def camera(self):\n self.spectrum = self.spectrum", "def _derive_transformation_matrices(self):\n\n if hasattr(self, '_primaries') and hasattr(self, '_whitepoint'):\n if self._primaries is not None and self._whitepoint is not None:\n npm = normalised_primary_matrix(self._primaries,\n self._whitepoint)\n\n self._derived_RGB_to_XYZ_matrix = npm\n self._derived_XYZ_to_RGB_matrix = np.linalg.inv(npm)", "def camera_callback(self, data):\n self.camera_mutex.acquire()\n self.position = [data.pose.position.x, data.pose.position.z, data.pose.position.y]\n self.rotation = [data.pose.orientation.x, data.pose.orientation.z, data.pose.orientation.y, data.pose.orientation.w]\n self.camera_mutex.release()", "def load_intrinsics(self):\n data = self.um.read_json(\"intrinsics.json\")\n K = np.asarray(data[\"K\"])\n d = np.asarray(data[\"dist\"])\n return K, d", "def pixel2cam(self, depth, intrinsics_inv):\n b, _, h, w = depth.size()\n i_range = torch.arange(0, h).view(1, h, 1).expand(1,h,w).type_as(depth) # [1, H, W]\n j_range = torch.arange(0, w).view(1, 1, w).expand(1,h,w).type_as(depth) # [1, H, W]\n ones = torch.ones(1,h,w).type_as(depth)\n pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]\n ###pixel_coords is an array of camera pixel coordinates (x,y,1) where x,y origin is the upper left corner of the image.\n current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).view(b,3,-1) #.contiguous().view(b, 3, -1) # [B, 3, H*W]\n #cam_coords = intrinsic_inv.expand(b,3,3).bmm(current_pixel_coords).view(b,3,h,w)\n cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b,3,h,w)\n return cam_coords * depth", "def add_files(self, file_dict):\n from xeye_calib import resize_rgb_b64\n if self.src_keys is None:\n self.src_keys, self.rgb_cam_list, self.rgb_of_depth_cam_list = init_cam_set(file_dict)\n self.src_keys_dict = {v: i for i, v in enumerate(self.src_keys)}\n logger.info('Init Calibrator done.')\n logger.info('src_keys_dict, {}'.format(self.src_keys_dict))\n logger.info('file_dict.keys, {}'.format(file_dict.keys()))\n for k, v in file_dict.items():\n filename = str(10000000 + self.counter)[1:]\n if k.startswith('cam'):\n if 'dept' in k:\n continue\n print(self.src_keys_dict.keys())\n cam_id = self.src_keys_dict[k]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.png')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n # print('calib data copy', v, dst_path)\n # print('calib data copy', v, dst_path, file=sys.stderr)\n # with open(self.record_path, 'a') as fout:\n # fout.write('cp ' + v + ' ' + dst_path + '\\n')\n with open(dst_path, 'wb') as fout:\n fout.write(base64.b64decode(v))\n elif k.startswith('rgb'):\n cam_id = self.src_keys_dict[k]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.jpg')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n if self.resize_xeye:\n resize_rgb_b64(v, dst_path)\n else:\n with open(dst_path, 'wb') as fout:\n fout.write(base64.b64decode(v))\n\n else:\n logger.warn('Unrocognize key: {}'.format(k))\n return\n self.counter += 1", "def setCameraMatrix(self, val): # real signature unknown; restored from __doc__\n pass", "def modelview_matrix(self):\n camera = self.figure.scene.camera\n return camera.view_transform_matrix.to_array().astype(np.float32)", "def update(self):\n if self.var_info.bits_per_pixel == 1:\n b = self._img.tobytes(\"raw\", \"1;R\")\n self.mmap[:len(b)] = b\n\n elif self.var_info.bits_per_pixel == 16:\n self.mmap[:] = self._img_to_rgb565_bytes()\n\n elif self.var_info.bits_per_pixel == 32:\n self.mmap[:] = self._img.convert(\"RGB\").tobytes(\"raw\", \"XRGB\")\n\n else:\n raise Exception(\"Not supported - platform %s with bits_per_pixel %s\" %\n (self.platform, self.var_info.bits_per_pixel))", "def _cache_checker_matrices(self):\r\n if self.model.mat_texid is not None:\r\n self._geom_checker_mats = []\r\n for geom_id in range(self.model.ngeom):\r\n mat_id = self.model.geom_matid[geom_id]\r\n tex_id = self.model.mat_texid[mat_id]\r\n texture = self.textures[tex_id]\r\n h, w = texture.bitmap.shape[:2]\r\n self._geom_checker_mats.append(self._make_checker_matrices(h, w))\r\n\r\n # add skybox\r\n skybox_tex_id = -1\r\n for tex_id in range(self.model.ntex):\r\n skybox_textype = 2\r\n if self.model.tex_type[tex_id] == skybox_textype:\r\n skybox_tex_id = tex_id\r\n if skybox_tex_id >= 0:\r\n texture = self.textures[skybox_tex_id]\r\n h, w = texture.bitmap.shape[:2]\r\n self._skybox_checker_mat = self._make_checker_matrices(h, w)\r\n else:\r\n self._skybox_checker_mat = None", "def get_transforms(args):\n gps_reader = GPSReader(args['gps'])\n gps_data = gps_reader.getNumericData()\n imu_transforms = IMUTransforms(gps_data)\n return imu_transforms", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def preprocess(self):\n\n mm_magcoord.add_aacgm_coordinates(self)\n mm_magcoord.add_quasi_dipole_coordinates(self)\n mm_sc.calculate_ecef_velocity(self)\n mm_sc.add_ram_pointing_sc_attitude_vectors(self)\n\n return", "def imaging(input_model, reference_files):\n detector = cf.Frame2D(name='detector', axes_order=(0, 1), unit=(u.pix, u.pix))\n v2v3 = cf.Frame2D(name='v2v3', axes_order=(0, 1), unit=(u.deg, u.deg))\n world = cf.CelestialFrame(reference_frame=coord.ICRS(), name='world')\n\n subarray2full = subarray_transform(input_model)\n imdistortion = imaging_distortion(input_model, reference_files)\n distortion = subarray2full | imdistortion\n distortion.bounding_box = imdistortion.bounding_box\n del imdistortion.bounding_box\n tel2sky = pointing.v23tosky(input_model)\n pipeline = [(detector, distortion),\n (v2v3, tel2sky),\n (world, None)]\n return pipeline", "def m(self) -> np.ndarray:\n assert self._k is not None and self._r is not None and self._t is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k, r=self._r, t=self._t)", "def __set_perspective(self):\n\n src = np.float32([[(.42 * self.img_shape[1],.65 * self.img_shape[0] ),\n (.58 * self.img_shape[1], .65 * self.img_shape[0]),\n (0 * self.img_shape[1],self.img_shape[0]),\n (1 * self.img_shape[1], self.img_shape[0])]])\n\n dst = np.float32([[0,0],\n [self.img_shape[1],0],\n [0,self.img_shape[0]],\n [self.img_shape[1],self.img_shape[0]]])\n\n self.M = cv2.getPerspectiveTransform(src, dst)\n self.M_inv = cv2.getPerspectiveTransform(dst, src)", "def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', chip_ids=self.chip_ids, core_ids=self.core_ids, cx_ids=self.cx_ids)", "def _read_calibration_params(self) -> np.ndarray:\n print('Loading calibration parameters...')\n cameras_data = []\n\n for c in range(self.num_cameras):\n camera = 'camera' + str(c).zfill(2) + '.json'\n print(' ', camera+'...')\n with open(os.path.join(self.cameras_dir, camera)) as f:\n data = json.load(f)\n\n # # Store data for each frame in numpy array\n # camera_params = np.empty(0)\n # for d in data:\n # frames = d['end_frame'] - d['start_frame']\n # del d['start_frame']\n # del d['end_frame']\n # cam = np.full(frames, d)\n # camera_params = np.append(camera_params, cam, axis=0)\n #\n cameras_data.append(data)\n return np.array(cameras_data, dtype=object)", "def add_snapshot(self):\n\n\t\tself.mu_values = self.cvt_handler.mu_values\n\t\tdim_mu = self.mu_values.shape[1]\n\t\taux_snapshot = self.file_handler.parse(self.namefile_prefix + str(dim_mu-1) + self.file_format, self.output_name)\n\t\tsnapshot = aux_snapshot.reshape(aux_snapshot.shape[0],1)\n\t\tself.snapshots = np.append(self.snapshots, snapshot, 1)\n\t\t\n\t\tself.print_info()", "def create_matrices(self):\n debut = time.process_time()\n\n x = np.arange(0, self.axe_X)\n y = np.arange(0, self.axe_Y)\n xv, yv = np.meshgrid(x, y)\n\n print(\"\\nmatrix creation estimation time: %.1fs\" % (1e-6*self.axe_X*self.axe_Y))\n\n img_matrix_x, img_matrix_y = self.find_position(xv, yv)\n\n fin = time.process_time()\n print(\"matrix created in time: %.1f s\" % (fin-debut))\n return img_matrix_x, img_matrix_y", "def CameraToImage(self, cameraPoints):\n # setting up the required matrices\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n if np.isscalar(a0):\n\n R = np.array([[a1, a2], [b1, b2]])\n T = np.array([[a0], [b0]])\n\n else:\n R = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n T = np.array([[a0[0]], [b0[0]]])\n\n cameraPoints = cameraPoints.T\n # computing the transformation to the image system\n return (T + np.dot(R, cameraPoints)).T", "def init_storage(self):\n\n # Create the average rotation matrix (first order).\n self.first_frame_order = zeros((INC+1, 3, 3), float64)\n\n # Create the frame order matrix (each element is ensemble averaged and corresponds to a different time step).\n self.second_frame_order = zeros((INC+1, 9, 9), float64)\n\n # Init the rotation matrix.\n self.rot = zeros((3, 3), float64)\n\n # Some data arrays.\n self.full = zeros(INC+1)\n self.count = zeros(INC+1)", "def world_to_camera(self, X):\n raise NotImplementedError", "def camera_to_world(self, X):\n raise NotImplementedError", "def write_features(self):\r\n def pack_keypoint(keypoints, descriptors):\r\n kpts = np.array([[kp.pt[0], kp.pt[1], kp.size,\r\n kp.angle, kp.response, kp.octave,\r\n kp.class_id]\r\n for kp in keypoints])\r\n desc = np.array(descriptors)\r\n return kpts, desc\r\n\r\n filename = self.features_path + self.id\r\n kpts, desc = pack_keypoint(self.keypoints, self.descriptors)\r\n logging.info(f'Writing features of image {self.name} to file...')\r\n np.savez(filename, keypoints=kpts, descriptors=desc)\r\n logging.info('Features saved.')", "def calibrateCamera(config,cbrow = 4,cbcol = 3,calibrate=False,alpha=0.4,manualPoints=False):\n\n # Termination criteria\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n # Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((cbrow * cbcol, 3), np.float32)\n objp[:, :2] = np.mgrid[0:cbcol, 0:cbrow].T.reshape(-1, 2)\n \n # Read the config file\n cfg_3d = auxiliaryfunctions.read_config(config)\n img_path,path_corners,path_camera_matrix,path_undistort=caf.Foldernames3Dproject(cfg_3d)\n \n # Make sure that the folders are present (if not, make them)\n if not os.path.exists(img_path):\n os.makedirs(img_path)\n if not os.path.exists(path_corners):\n os.makedirs(path_corners)\n \n # Get images and camera names\n images = glob.glob(os.path.join(img_path,'*.jpg'))\n cam_names = cfg_3d['camera_names']\n \n ## It's not clear to me why I want to do this or what this number represents... I need to read further into it\n # # update the variable snapshot* in config file according to the name of the cameras\n # try:\n # for i in range(len(cam_names)):\n # cfg_3d[str('config_file_'+cam_names[i])] = cfg_3d.pop(str('config_file_camera-'+str(i+1)))\n # for i in range(len(cam_names)):\n # cfg_3d[str('shuffle_'+cam_names[i])] = cfg_3d.pop(str('shuffle_camera-'+str(i+1)))\n # except:\n # pass\n \n project_path = cfg_3d['project_path']\n projconfigfile=os.path.join(str(project_path),'config.yaml')\n auxiliaryfunctions.write_config_3d(projconfigfile,cfg_3d)\n\n # Initialize the dictionary \n img_shape = {}\n objpoints = {} # 3d point in real world space\n imgpoints = {} # 2d points in image plane.\n dist_pickle = {} ## I think this is the intrinsic parameter file that needs to be read in\n stereo_params= {}\n for cam in cam_names:\n objpoints.setdefault(cam, [])\n imgpoints.setdefault(cam, [])\n dist_pickle.setdefault(cam, [])\n\n # Sort the images.\n images.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))\n if len(images)==0:\n raise Exception(\"No calibration images found. Make sure the calibration images are saved as .jpg and with prefix as the camera name as specified in the config.yaml file.\")\n direct_images = [img for img in images if 'direct' in img]\n mirror_images = [img for img in images if 'mirror' in img]\n\n if manualPoints:\n # This is where we read in the manually identified points and check them and stuff\n csvFiles = glob.glob(os.path.join(img_path,'*.csv'))\n for fname_csv in csvFiles:\n allPoints = caf.readCSV(fname_csv)\n for row in allPoints:\n continue\n # Start with mirror to figure out which BGR to use for direct\n for fname in mirror_images:\n \n filename=Path(fname).stem\n img = cv2.imread(fname)\n\n # Create a dictionary with all of the different image color conversions for testing\n img_colorConv = {\n \"BGR\":img,\n \"HSV\":cv2.cvtColor(img,40),\n \"Gray\":cv2.cvtColor(img,6)\n }\n\n thresh = 120\n ret = False\n for colorConv in img_colorConv:\n currImg = img_colorConv[colorConv]\n size = currImg.shape\n \n if len(size) == 2:\n \n ret, corners = cv2.findChessboardCorners(currImg, (cbcol,cbrow),None,)\n if ret == True: break\n\n currImg_bw = cv2.threshold(currImg,thresh,255,cv2.THRESH_BINARY)[1]\n ret, corners = cv2.findChessboardCorners(currImg_bw, (cbcol,cbrow),None,)\n if ret == True: break\n else: continue\n \n chanIdx = 0\n while (ret == False) and (chanIdx < size[2]):\n ret, corners = cv2.findChessboardCorners(currImg[:,:,chanIdx], (cbcol,cbrow),None,)\n if ret == True: break\n channel_bw = cv2.threshold(currImg[:,:,chanIdx],thresh,255,cv2.THRESH_BINARY)[1]\n ret, corners = cv2.findChessboardCorners(channel_bw, (cbcol,cbrow),None,)\n chanIdx += 1\n \n # If found, add object points, image points (after refining them)\n if ret == True:\n currImg = img_colorConv[\"Gray\"]\n img_shape[cam] = currImg.shape[::-1]\n objpoints[cam].append(objp)\n corners = cv2.cornerSubPix(currImg,corners,(11,11),(-1,-1),criteria)\n imgpoints[cam].append(corners)\n # Draw the corners and store the images\n img = cv2.drawChessboardCorners(currImg, (cbcol,cbrow), corners,ret)\n cv2.imwrite(os.path.join(str(path_corners),filename+'_corner.jpg'),img)\n else:\n print(\"Corners not found for the image %s\" %Path(fname).name)\n \n try:\n h, w = img.shape[:2]\n except:\n raise Exception(\"It seems that the name of calibration images does not match with the camera names in the config file. Please make sure that the calibration images are named with camera names as specified in the config.yaml file.\")\n\n # Perform calibration for each cameras and store the matrices as a pickle file\n if calibrate == True:\n \n # Read in the intrinsic parameters for each camera\n for cam in cam_names:\n dist_pickle[cam] = pickle.load(os.path.join(path_camera_matrix,cam+'_intrinsic_params.pickle'))\n\n # # Compute stereo calibration for each pair of cameras\n # camera_pair = [[cam_names[0], cam_names[1]]]\n # for pair in camera_pair:\n # print(\"Computing stereo calibration for \" %pair)\n # retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objpoints[pair[0]],imgpoints[pair[0]],imgpoints[pair[1]],dist_pickle[pair[0]]['mtx'],dist_pickle[pair[0]]['dist'], dist_pickle[pair[1]]['mtx'], dist_pickle[pair[1]]['dist'],(h, w),flags = cv2.CALIB_FIX_INTRINSIC)\n\n # # Stereo Rectification\n # rectify_scale = alpha # Free scaling parameter check this https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#fisheye-stereorectify\n # R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (h, w), R, T, alpha = rectify_scale)\n \n # stereo_params[pair[0]+'-'+pair[1]] = {\"cameraMatrix1\": cameraMatrix1,\"cameraMatrix2\": cameraMatrix2,\"distCoeffs1\": distCoeffs1,\"distCoeffs2\": distCoeffs2,\"R\":R,\"T\":T,\"E\":E,\"F\":F,\n # \"R1\":R1,\n # \"R2\":R2,\n # \"P1\":P1,\n # \"P2\":P2,\n # \"roi1\":roi1,\n # \"roi2\":roi2,\n # \"Q\":Q,\n # \"image_shape\":[img_shape[pair[0]],img_shape[pair[1]]]}\n \n # print('Saving the stereo parameters for every pair of cameras as a pickle file in %s'%str(os.path.join(path_camera_matrix)))\n \n # auxiliaryfunctions.write_pickle(os.path.join(path_camera_matrix,'stereo_params.pickle'),stereo_params)\n # print(\"Camera calibration done! Use the function ``check_undistortion`` to check the check the calibration\")\n # else:\n # print(\"Corners extracted! You may check for the extracted corners in the directory %s and remove the pair of images where the corners are incorrectly detected. If all the corners are detected correctly with right order, then re-run the same function and use the flag ``calibrate=True``, to calbrate the camera.\"%str(path_corners))", "def save_nifti(self, path):\n meta = {'te': self.te, 'tr': self.tr, 'sw': self.sw}\n if self.sequence_type == 'STEAM':\n meta['tm'] = self.tm\n\n # store real and imaginary components in last 2 dims\n component_fid = np.stack((np.real(self.fid),np.imag(self.fid)), -2)\n nifti = nib.Nifti2Image(component_fid, self.transform.get_matrix(), extra=meta)\n nib.save(nifti, path)", "def transform(self,image,masks,aug):\n # convert to PIL Image.\n PIL_convert = transforms.ToPILImage()\n image = PIL_convert(image)\n masks = PIL_convert(masks.astype(np.int32))\n # resize the image and masks\n resize = transforms.Resize(size=(512,512))\n image = resize(image)\n masks = resize(masks)\n # augmentation\n if aug is True:\n augment(image,masks)\n else:\n pass\n # Convert to Tensor\n image = TF.to_tensor(image)\n masks = TF.to_tensor(masks)\n\n return image,masks", "def set_attributes(self):\n\n pil_image = PILImage.open(self.path)\n\n # Get the exif data\n # Thanks https://gist.github.com/erans/983821\n exif_data = {}\n info = pil_image._getexif()\n if info:\n for tag, value in info.items():\n decoded = PILExifTags.TAGS.get(tag, tag)\n if decoded == \"GPSInfo\":\n gps_data = {}\n for t in value:\n sub_decoded = PILExifTags.GPSTAGS.get(t, t)\n gps_data[sub_decoded] = value[t]\n\n exif_data[decoded] = gps_data\n else:\n exif_data[decoded] = value\n\n gps_latitude = exif_data.get(\"GPSInfo\",{}).get(\"GPSLatitude\")\n gps_latitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSLatitudeRef')\n gps_longitude = exif_data.get(\"GPSInfo\",{}).get('GPSLongitude')\n gps_longitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSLongitudeRef')\n gps_altitude = exif_data.get(\"GPSInfo\",{}).get('GPSAltitude')\n gps_altitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSAltitudeRef')\n gps_direction = exif_data.get(\"GPSInfo\",{}).get('GPSImgDirection')\n gps_direction_ref = exif_data.get(\"GPSInfo\",{}).get('GPSImgDirectionRef')\n\n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = gps_tag_to_decimal_degress(gps_latitude)\n if gps_latitude_ref != \"N\": \n lat = 0 - lat\n\n lon = gps_tag_to_decimal_degress(gps_longitude)\n if gps_longitude_ref != \"E\":\n lon = 0 - lon\n\n # image attributes\n self.width, self.height = pil_image.size\n # exif attributes\n self.lat, self.lon = lat, lon\n self.focal = float(exif_data[\"FocalLengthIn35mmFilm\"])\n self.timestamp = datetime.datetime.strptime(exif_data[\"DateTimeOriginal\"], \"%Y:%m:%d %H:%M:%S\").timestamp()\n self.altitude = gps_altitude[0] / gps_altitude[1]\n self.direction = float(gps_direction) if gps_direction is not None else None\n self.pixel_size = (self.altitude * 35.0 / self.focal) / float(self.width)\n # transform attributes\n self.point = self.drone_map.reproject(lon,lat)\n self.angle = float(gps_direction) if gps_direction is not None else 0\n self.scale = 1.0", "def __getitem__(self, index):\n inputs = {}\n\n do_color_aug = self.is_train and random.random() > 0.5\n do_flip = self.is_train and random.random() > 0.5\n\n for i in self.frame_idxs:\n if i=='s':\n filename = os.path.join('cam1', 'data', self.filenames[index])\n else:\n filename = os.path.join('cam0', 'data', self.filenames[index+i])\n\n inputs[(\"color\", i, -1)] = self.get_color(filename, do_flip)\n\n # adjusting intrinsics to match each scale in the pyramid\n K = self.K.copy()\n K[0, :] *= self.width\n K[1, :] *= self.height\n inv_K = np.linalg.pinv(K)\n\n inputs[(\"K\")] = torch.from_numpy(K)\n inputs[(\"inv_K\")] = torch.from_numpy(inv_K)\n\n if do_color_aug:\n color_aug = transforms.ColorJitter.get_params(self.brightness, self.contrast, self.saturation, self.hue)\n else:\n color_aug = (lambda x: x)\n\n self.preprocess(inputs, color_aug)\n\n for i in self.frame_idxs:\n del inputs[(\"color\", i, -1)]\n\n if \"s\" in self.frame_idxs:\n stereo_T = np.eye(4, dtype=np.float32)\n baseline_sign = -1 if do_flip else 1\n side_sign = -1\n stereo_T[0, 3] = side_sign * baseline_sign * 0.1\n inputs[\"stereo_T\"] = torch.from_numpy(stereo_T)\n\n return inputs", "def camera_to_pixel(self, X):\n raise NotImplementedError", "def prepare_data(camera, image_nums):\n from gen_d_params import gen_d_params\n\n # Fetch the data of interest set by \"camera\" and \"image_nums\"\n imgs = [camera + x for x in image_nums]\n d_params = gen_d_params(imgs)\n \n # Add azimuth feature for pixel location\n d_params[\"err_ang\"] = np.rad2deg(np.arctan2(d_params[\"ydiff\"], d_params[\"xdiff\"]))\n d_params[\"r_img\"] = np.sqrt(np.power(d_params[\"x_img\"], 2) + np.power(d_params[\"y_img\"], 2)) \n d_params[\"err_mag\"] = d_params.pop(\"mag\")\n d_params[\"azm_img\"] = np.rad2deg(np.arctan2(d_params[\"y_img\"], d_params[\"x_img\"]))\n \n x1 = np.reshape(d_params[\"r_img\"], (len(d_params[\"r_img\"]), 1))\n x2 = np.reshape(d_params[\"azm_img\"], (len(d_params[\"azm_img\"]), 1))\n x = np.hstack((x1, x2))\n y1 = np.reshape(d_params[\"err_mag\"], (len(d_params[\"err_mag\"]), 1))\n y2 = np.reshape(d_params[\"err_ang\"], (len(d_params[\"err_ang\"]), 1))\n y = np.hstack((y1, y2))\n\n return x, y", "def __getitem__(self, index):\n\n totensor = transforms.Compose(\n [transforms.Resize((224, 224)),\n transforms.ToTensor()\n ])\n\n assert (index < len(self.data))\n assert (index < self.len)\n images = self.data[index]\n # print(images)\n img = cv2.imread(os.path.join(self.dataset.directory, images))\n\n target = self.bbox[index]\n\n scale = np.array(img.shape) / 224\n\n # img = cv2.rectangle(img, (target[0]-10, target[1]-10), (target[2]+10, target[3]+10),\n # color=(255, 255, 0), thickness=10)\n\n # cv2.imwrite(os.path.join(\"res\", str(index)+\".jpg\"), draw)\n\n # print(img.shape, scale)\n img = cv2.resize(img, (224, 224))\n\n # print(target)\n\n target[0] = int(target[0] / scale[1] - 5)\n target[1] = int(target[1] / scale[0] - 5)\n target[2] = int(target[2] / scale[1] + 5)\n target[3] = int(target[3] / scale[0] + 5)\n\n # print(target)\n t = target\n if self.transform is not None:\n seq_det = self.transform.to_deterministic() # call this for each batch again, NOT only once at the start\n\n keypoints_on_images = []\n keypoints = []\n keypoints.append(ia.Keypoint(x=target[0], y=target[1]))\n keypoints.append(ia.Keypoint(x=target[2], y=target[3]))\n\n keypoints_on_images.append(ia.KeypointsOnImage(keypoints, shape=np.asarray(img).shape[:-1]))\n\n # augment keypoints and images\n img = seq_det.augment_image(np.asarray(img))\n after_aug = []\n\n target = seq_det.augment_keypoints(keypoints_on_images)\n for point in target[0].keypoints:\n # print(point)\n x_new, y_new = point.x, point.y\n after_aug.append(point.x)\n after_aug.append(point.y)\n target = after_aug\n # print(after_aug)\n newImg = Image.fromarray(img)\n reg_targets = np.float32(np.array(target))\n\n b=self.labels[index]\n\n #a = np.array(self.labels[index])\n #b = np.zeros((a.size, 2))\n #b[np.arange(a.size), a] = 1\n\n #print(\"B=\",b,self.labels[index])\n\n #print(targets)\n ##draw = cv2.rectangle(cv2.resize(np.array(newImg), (224, 224)), (t[1], t[0]), (t[3], t[2]), color=(0, 0, 0),\n # thickness=6)\n\n #draw = cv2.rectangle(cv2.resize(np.array(draw), (224, 224)), (targets[0], targets[1]), (targets[2], targets[3]),\n # color=(0, 255, 0), thickness=3)\n\n #cv2.imwrite(os.path.join(\"res\", str(index) + \".jpg\"), draw)\n #print(reg_targets)\n\n return totensor(newImg), reg_targets,b ,index", "def save_calibration(self):\n\n path = './{}/'.format(self.controller)\n # Check if path exist, if not, creates it.\n if not os.path.exists(os.path.dirname(path)):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError as exc:\n # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n # Save Jacobian matrix (self.mat)\n with open(\"./{i}/mat.txt\".format(i=self.controller), 'wt') as f:\n for i in range(3):\n f.write('{a},{b},{c}\\n'.format(a=self.mat[i, 0], b=self.mat[i, 1], c=self.mat[i, 2]))\n\n # Save rotational matrix\n with open(\"./{i}/rotmat.txt\".format(i=self.controller), 'wt') as f:\n for i in range(3):\n f.write('{a},{b},{c}\\n'.format(a=self.rot[i, 0], b=self.rot[i, 1], c=self.rot[i, 2]))\n\n # Save other data\n with open('./{i}/data.txt'.format(i=self.controller), 'wt') as f:\n f.write('{d}\\n'.format(d=self.um_px))\n f.write('{d}\\n'.format(d=self.x_init))\n f.write('{d}\\n'.format(d=self.y_init))\n f.write('{d}\\n'.format(d=self.template_loc[0]))\n f.write('{d}\\n'.format(d=self.template_loc[1]))", "def get_image(self):\n if self._image is None:\n image_data = np.load(self.image_file)\n if not isinstance(image_data, np.ndarray):\n image_data = image_data['arr_0']\n self.meta_data = ImageWrapper.load_metadata(self.image_file+\".meta\")\n exposure_time = self.meta_data['exposure_time_us'] * 1e-6\n dark_level = float(self.meta_data['black_level'])\n # saturation_mask = image_data.max(axis=2) >= 4094\n image_data = np.clip((image_data.astype(np.float32) - dark_level),\n a_min=0.0, a_max=None) / exposure_time\n if self.original_vignetting is not None:\n image_data = image_data / self.original_vignetting\n if self.crop is not None:\n image_data = image_data[\n self.crop[1,0]:self.crop[1,1],\n self.crop[0,0]:self.crop[0,1]\n ]\n # saturation_mask = saturation_mask[\n # self.crop[1,0]:self.crop[1,1],\n # self.crop[0,0]:self.crop[0,1]\n # ]\n if self.down_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=1./self.down_sample,\n fy=1./self.down_sample,\n interpolation=cv2.INTER_AREA\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=1./self.down_sample,\n # fy=1./self.down_sample,\n # interpolation=cv2.INTER_AREA\n # )\n if self.reup_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=self.reup_sample,\n fy=self.reup_sample,\n interpolation=cv2.INTER_CUBIC\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=self.reup_sample,\n # fy=self.reup_sample,\n # interpolation=cv2.INTER_CUBIC\n # )\n image = torch.tensor(np.transpose(image_data, (2,0,1)), dtype=torch.float32, device=self.device)\n # saturation_mask = torch.tensor(saturation_mask, dtype=torch.float32, device=self.device)\n if not self.lazy:\n self._image = image\n # self._saturation_mask = saturation_mask\n else:\n image = self._image\n # saturation_mask = self._saturation_mask\n\n return image#, saturation_mask", "def __getitem__(self, idx):\n R = self.R\n \n image_path = os.path.join(self.image_dir, self.data['ImageId'][idx] + '.jpg')\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = torch.from_numpy(image)\n H, W, N = image.size() # H, W, 3\n image = image.permute(2, 0, 1).float()\n image = 2*(image/255)-1\n\n car_id, euler_angle, world_coords = from_prediction_string_to_world_coords(self.data['PredictionString'][idx])\n image_coords = from_camera_coords_to_image_coords(world_coords, self.camera_matrix)\n image_coords = torch.from_numpy(image_coords).float()\n \n target_pointmap = torch.zeros(H//R, W//R, dtype=torch.float)\n target_heatmap = None\n target_local_offset = torch.zeros(2, H//R, W//R, dtype=torch.float)\n target_depth = torch.zeros(H//R, W//R, dtype=torch.float)\n target_yaw = torch.zeros(H//R, W//R, dtype=torch.float)\n target_pitch = torch.zeros(H//R, W//R, dtype=torch.float)\n target_roll = torch.zeros(H//R, W//R, dtype=torch.float)\n h = torch.arange(0, H//R, 1, dtype=torch.float).unsqueeze(dim=1).expand(-1, W//R)\n w = torch.arange(0, W//R, 1, dtype=torch.float).unsqueeze(dim=0).expand(H//R, -1)\n p_h = image_coords[:, 1]\n p_w = image_coords[:, 0]\n \n num_object = torch.Tensor([len(car_id)])\n \n for object_id in range(int(num_object.item())):\n point = ((p_h[object_id]/R).int(), (p_w[object_id]/R).int())\n \n if point[0] < 0 or H//R - 1 < point[0] or point[1] < 0 or W//R - 1 < point[1] :\n continue\n target_pointmap[point[0], point[1]] = 1\n target_local_offset[0][point[0], point[1]] = p_h[object_id]/R - point[0].float()\n target_local_offset[1][point[0], point[1]] = p_w[object_id]/R - point[1].float()\n target_depth[point[0], point[1]] = image_coords[object_id, 2]\n target_yaw[point[0], point[1]] = torch.Tensor([euler_angle[object_id][0]])\n target_pitch[point[0], point[1]] = torch.Tensor([euler_angle[object_id][1]])\n target_roll[point[0], point[1]] = torch.Tensor([euler_angle[object_id][2]])\n \n sigma = self.coeff_sigma / target_depth[point[0], point[1]]\n exponent = - ((h-p_h[object_id]/R)**2 + (w-p_w[object_id]/R)**2)/(2 * torch.pow(sigma, 2))\n heatmap = torch.exp(exponent)\n \n if target_heatmap is None:\n target_heatmap = heatmap.unsqueeze(dim=0)\n else:\n target_heatmap = torch.cat((target_heatmap, heatmap.unsqueeze(dim=0)), dim=0)\n\n target_heatmap, _ = torch.max(target_heatmap, dim=0)\n \n if torch.cuda.is_available():\n image = image.cuda().contiguous()\n target = {'num_object': num_object.cuda(), 'pointmap': target_pointmap.cuda(), 'heatmap': target_heatmap.cuda(), 'local_offset': target_local_offset.cuda(), 'depth': target_depth.cuda(), 'yaw': target_yaw.cuda(), 'pitch': target_pitch.cuda(), 'roll': target_roll.cuda()}\n else:\n image = image.contiguous()\n target = {'num_object': num_object, 'pointmap': target_pointmap, 'heatmap': target_heatmap, 'local_offset': target_local_offset, 'depth': target_depth, 'yaw': target_yaw, 'pitch': target_pitch, 'roll': target_roll}\n \n return image, target", "def pre_cache():\n for transform, _ in self._pairs:\n world_matrix = transform[\"worldMatrix\"][0].asMatrix()\n parent_matrix = transform[\"parentMatrix\"][0].asMatrix()\n matrix = transform[\"matrix\"].asMatrix()\n translate = transform[\"translate\"].as_vector()\n rotate = transform[\"rotate\"].as_euler()\n\n if \"jointOrient\" in transform:\n joint_orient = transform[\"jointOrient\"].as_quaternion()\n else:\n # Only joints have these\n joint_orient = cmdx.Quaternion()\n\n self._cache[(transform, \"worldMatrix\")] = world_matrix\n self._cache[(transform, \"parentMatrix\")] = parent_matrix\n self._cache[(transform, \"matrix\")] = matrix\n self._cache[(transform, \"translate\")] = translate\n self._cache[(transform, \"rotate\")] = rotate\n self._cache[(transform, \"jointOrient\")] = joint_orient", "def imgsz(self) -> np.ndarray:\n return self._vector[6:8].astype(int)", "def LoadZVIMetaData(filename):\r\n print \"Loading zvi file metadata...\"\r\n\r\n ole = OleFileIO_PL.OleFileIO(filename)\r\n #ole.dumpdirectory()\r\n metadata=ole.extract_metadata()\r\n (channeldict,Width,Height,MosaicSizeX,MosaicSizeY,ScaleFactorX,ScaleFactorY,\\\r\n channels,XPositions,YPositions,FocusPositions,XCoors,YCoors,ExposureTimes)=metadata\r\n Xpos=np.array(XPositions);\r\n Ypos=np.array(YPositions);\r\n\r\n extent=[Xpos.min()-(Width/2)*ScaleFactorX,Xpos.max()+(Width/2)*ScaleFactorX,\\\r\n Ypos.max()+(Height/2)*ScaleFactorY,Ypos.min()-(Height/2)*ScaleFactorY]\r\n \r\n return extent", "def set_recording_vectors(self, i):\n #soma_v_vec = h.Vector() # Membrane potential vector at soma\n setattr(self, 'soma_v_vec_' + str(i), h.Vector() )\n #dend_v_vec = h.Vector() # Membrane potential vector at dendrite\n setattr(self, 'dend_v_vec_' + str(i), h.Vector() )\n \n getattr(self, 'soma_v_vec_' + str(i)).record(self.cells[i].soma(0.5)._ref_v)\n neuron_utils.createStateVariable(id='v_vec_soma_' + str(i) , name='v_vec_soma_' + str(i) ,\n units='mV', python_variable={\"record_variable\": getattr(self, 'soma_v_vec_' + str(i)),\n \"segment\": self.cells[i].soma(0.5)})\n\n getattr(self, 'dend_v_vec_' + str(i)).record(self.cells[i].dend(0.5)._ref_v)\n neuron_utils.createStateVariable(id='v_vec_dend_' + str(i) , name='v_vec_dend_' + str(i) ,\n units='mV', python_variable={\"record_variable\": getattr(self, 'dend_v_vec_' + str(i)),\n \"segment\": self.cells[i].dend(0.5)})", "def _get_observation(self, unseen=False):\n img_arr = p.getCameraImage(width=self._width,\n height=self._height,\n viewMatrix=self._view_matrix,\n projectionMatrix=self._proj_matrix)\n rgb = img_arr[2]\n depth = img_arr[3]\n min = 0.97\n max=1.0\n segmentation = img_arr[4]\n depth = np.reshape(depth, (self._height, self._width,1) )\n segmentation = np.reshape(segmentation, (self._height, self._width,1) )\n\n np_img_arr = np.reshape(rgb, (self._height, self._width, 4))\n np_img_arr = np_img_arr[:, :, :3].astype(np.float64)\n\n view_mat = np.asarray(self._view_matrix).reshape(4, 4)\n proj_mat = np.asarray(self._proj_matrix).reshape(4, 4)\n # pos = np.reshape(np.asarray(list(p.getBasePositionAndOrientation(self._objectUids[0])[0])+[1]), (4, 1))\n\n AABBs = np.zeros((len(self._objectUids), 2, 3))\n cls_ls = []\n \n for i, (_uid, _cls) in enumerate(zip(self._objectUids, self._objectClasses)):\n AABBs[i] = np.asarray(p.getAABB(_uid)).reshape(2, 3)\n cls_ls.append(NAME2IDX[_cls])\n\n # np.save('/home/tony/Desktop/obj_save/view_mat_'+str(self.img_save_cnt), view_mat)\n # np.save('/home/tony/Desktop/obj_save/proj_mat_'+str(self.img_save_cnt), proj_mat)\n # np.save('/home/tony/Desktop/obj_save/img_'+str(self.img_save_cnt), np_img_arr.astype(np.int16))\n # np.save('/home/tony/Desktop/obj_save/AABB_'+str(self.img_save_cnt), AABBs)\n # np.save('/home/tony/Desktop/obj_save/class_'+str(self.img_save_cnt), np.array(cls_ls))\n\n np.save(OUTPUT_DIR + '/image_' + str(self.img_save_cnt), np_img_arr.astype(np.int16))\n dets = np.zeros((AABBs.shape[0], 5))\n for i in range(AABBs.shape[0]):\n dets[i, :4] = self.get_2d_bbox(AABBs[i], view_mat, proj_mat, IM_HEIGHT, IM_WIDTH)\n dets[i, 4] = int(cls_ls[i])\n np.save(OUTPUT_DIR + '/annotation_'+str(self.img_save_cnt), dets)\n\n test = np.concatenate([np_img_arr[:, :, 0:2], segmentation], axis=-1)\n\n return test", "def set_camera_intrinsics(vtk_renderer,\n vtk_camera,\n width,\n height,\n f_x,\n f_y,\n c_x,\n c_y,\n near,\n far\n ):\n # pylint: disable=line-too-long\n tiled_aspect_ratio = vtk_renderer.GetTiledAspectRatio()\n\n # This is what we are calling the 'correct' matrix.\n # But we don't use it directly, meaning, we do NOT do:\n # vtk_camera.UseExplicitProjectionTransformMatrixOn()\n # vtk_camera.SetExplicitProjectionTransformMatrix(vtk_opengl)\n # Setting such an explicit projection matrix, stops vtkWindowToImageFilter\n # working, as reported here:\n # https://gitlab.kitware.com/vtk/vtk/-/issues/17520#note_776406\n vtk_opengl = compute_projection_matrix(width,\n height,\n f_x,\n f_y,\n c_x,\n c_y,\n near,\n far)\n\n # So, we have to coerce the normal vtkCamera parameters\n # to mimic such a matrix.\n\n # These come from: `benoitrosa <https://gist.github.com/benoitrosa/ffdb96eae376503dba5ee56f28fa0943>`_\n vtk_camera.SetClippingRange(near, far)\n\n wcx = -2.0 * (c_x - width / 2.0) / width\n wcy = 2.0 * (c_y - height / 2.0) / height\n\n vtk_camera.SetWindowCenter(wcx, wcy)\n\n # Set vertical view angle as an indirect way of setting the y focal distance\n angle = 180 / np.pi * 2.0 * np.arctan2(height / 2.0, f_y)\n vtk_camera.SetViewAngle(angle)\n\n # But after benoitrosa's method, the aspect/shear is still not right.\n # Remember:\n # Actual Projection Matrix = UserTransform * Projection * Shear * View\n\n # Set Identity/Default shear and UserTransform.\n vtk_user_mat = vtk.vtkMatrix4x4()\n vtk_user_mat.Identity()\n vtk_user_trans = vtk.vtkTransform()\n vtk_user_trans.SetMatrix(vtk_user_mat)\n vtk_camera.SetUserTransform(vtk_user_trans)\n vtk_camera.SetViewShear(0, 0, 0)\n\n # Retrieve the ProjectionTransformMatrix (which includes Shear/UserTransform)\n vtk_proj = vtk_camera.GetProjectionTransformMatrix(tiled_aspect_ratio, -1, 1)\n\n # Calculate aspect and shear, to fixup the projection matrix.\n aspect = vtk_opengl.GetElement(0, 0) / vtk_proj.GetElement(0, 0)\n shear = (aspect * vtk_proj.GetElement(0, 2) - vtk_opengl.GetElement(0, 2)) / (aspect * vtk_proj.GetElement(0, 0))\n\n # Now set them into the VTK matrices\n vtk_user_mat.SetElement(0, 0, aspect)\n vtk_user_trans.SetMatrix(vtk_user_mat)\n vtk_camera.SetUserTransform(vtk_user_trans)\n vtk_camera.SetViewShear(shear, 0, 0)\n vtk_camera.Modified()\n\n # This should now match the OpenGL matrix.\n vtk_proj = vtk_camera.GetProjectionTransformMatrix(tiled_aspect_ratio, -1, 1)\n\n # Return them both, just so calling clients can compare.\n return vtk_opengl, vtk_proj", "def get_camera_metadata(self):\n return self.camera.getHeight(), self.camera.getWidth(), 4 # channels", "def loadCameraCalibration(self, file_name=None):\n\n mat_str = []\n if file_name == None:\n file_str = \"/home/student/armlab-w20/util/calibration.cfg\"\n else:\n file_str = file_name\n with open(file_str, 'r') as f:\n for line in f:\n line = line.replace('[', '')\n line = line.replace(']', '')\n line = line.replace('\\n', '')\n mat_str.append(line)\n cam_mat_str = mat_str[1:4]\n dist_coeffs = mat_str[-2:]\n dist_coeffs = \"\".join(dist_coeffs)\n dist_coeffs = dist_coeffs.split()\n dist_coeffs = [float(coeff) for coeff in dist_coeffs]\n self.cam_int_mat = []\n for row in cam_mat_str:\n mat_row = []\n mat_row = row.split()\n mat_row = [float(i) for i in mat_row]\n self.cam_int_mat.append(mat_row)\n self.cam_int_mat = np.asarray(self.cam_int_mat)\n self.dist_coeffs = np.asarray(dist_coeffs)", "def get_image(self):\n self.flush_buffer()\n _, frame = self.cam.read()\n shift_frame = self.perspective_shift(frame)\n #shift_frame = None\n return frame, shift_frame", "def get_intrinsics(pipeline, stream=rs.stream.color):\n streams = [stream_ for stream_ in pipeline.get_active_profile().get_streams() if stream_.stream_type() == stream]\n intrinsics = None\n if streams:\n intrinsics = streams[0].as_video_stream_profile().get_intrinsics()\n return intrinsics", "def generate_transformations(self):\n if self.perform_aug:\n print(\"\\nAugmentation will be applied to the training images\")\n data_transforms = {\n \"train\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)),\n transforms.RandomRotation(degrees=45),\n transforms.ColorJitter(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),\n \"val\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)), # 256 used to be\n transforms.CenterCrop(self.input_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n }\n else:\n print(\"\\nNo augmentation will be applied to the training images\")\n data_transforms = {\n \"train\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),\n \"val\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)), # 256 used to be\n transforms.CenterCrop(self.input_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n }\n\n return data_transforms", "def get_exif_data(self):\n self.exif_data = piexif.load(self.img_path)\n return self.exif_data", "def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)", "def transformix_reg(output_dir, image_path):\n # start time\n start_time = time.time()\n \n # read parameter files\n trans_dir_inv = output_dir +'TransformParameters.inv.txt'\n trans_inv = sitk.ReadParameterFile(trans_dir_inv)\n \n trans_dir_fwd = output_dir +'TransformParameters.fwd.txt'\n trans_fwd = sitk.ReadParameterFile(trans_dir_fwd)\n\n # load image\n image = sitk.ReadImage(image_path)\n\n # initialize transformix\n transformixImageFilter = sitk.TransformixImageFilter()\n \n # set moving image\n transformixImageFilter.SetMovingImage(image)\n \n # set parameter\n transformixImageFilter.SetTransformParameterMap(trans_fwd)\n transformixImageFilter.AddTransformParameterMap(trans_inv)\n \n # set options, compute deformation field and Jac det\n transformixImageFilter.ComputeDeformationFieldOn()\n transformixImageFilter.ComputeDeterminantOfSpatialJacobianOn()\n \n # set output dir\n transformixImageFilter.SetOutputDirectory(output_dir)\n\n # run\n transformixImageFilter.Execute()\n\n # get results, jacobian det doesn't have a function, deformation field cannot be accessed\n tranResultImage = transformixImageFilter.GetResultImage()\n #JacDet = sitk.ReadImage(output_dir + \"spatialJacobian.nii\")\n \n # save results, rename spatialJacobian for consistancy\n sitk.WriteImage(tranResultImage, output_dir + \"result_4d.nii\")\n os.rename(output_dir + \"spatialJacobian.nii\", output_dir + \"JacDet_4d.nii\")\n\n # display registered image\n tranResultImage_np = sitk.GetArrayFromImage(tranResultImage)\n #pl.ImagePlot(tranResultImage_np, x = -1, y = -3)\n \n # remove unnecessary files\n for file in glob.glob(output_dir + 'TransformParameters*'):\n os.remove(file)\n \n # caclulate elapsed time\n end_time = time.time()\n elapsed_time = end_time - start_time\n print('transformix_reg done. elapsed time:', elapsed_time, 's')", "def get_image_and_vectorize(self, file_path):\r\n img = np.array(Image.open(file_path).convert('1'))\r\n img_vectorize = img.reshape(28 * 28)\r\n return img_vectorize", "def get_img():\n\timg = camera.Capture()\n\tarray = jetson.utils.cudaToNumpy(img)\n\n\treturn(array)", "def pose2mat(pose):\n extrinsic = torch.eye(4)\n extrinsic[:3, :] = pose[:, :4]\n inv_extrinsic = torch.inverse(extrinsic)\n extrinsic = torch.inverse(inv_extrinsic)\n h, w, focal_length = pose[:, 4]\n intrinsic = torch.Tensor([[focal_length, 0, w/2],\n [0, focal_length, h/2],\n [0, 0, 1]])\n\n return extrinsic, intrinsic", "def store_sf_img_metrics(self):\n logger.info('Storing iso image metrics')\n rows = list(self._metrics_table_row_gen(self.job_id, self.sf_db_id,\n self.sf_metrics_df, self.sf_adduct_peaksn,\n self.metrics))\n self.db.insert(METRICS_INS, rows)", "def get_raw_data(self):\n if self._img and self.is_4d():\n temp = self._img.get_data(caching='unchanged')\n temp = np.rot90(temp)\n for tp in self._loaded_time_list:\n temp[..., tp] = self._data[..., tp]\n else:\n temp = self._data.copy()\n\n return np.rot90(temp, 3)", "def associate_files(self):\n self.MatlabFiles = {'defaults': os.path.join(self.ParentDir,'defaults.m'),\n 'avevel': os.path.join(self.OutDir, 'pix2avevel.mat'),\n 'cumdef': os.path.join(self.OutDir, 'pix2cumdef.mat'),\n 'variance': os.path.join(self.OutDir, 'vaiance.mat')}", "def test_intrinsics_constructor() -> None:\n fx_px, fy_px = 1000, 1001\n\n width_px = 2048\n height_px = 1550\n\n cx_px, cy_px = 1024, 775\n\n intrinsics = Intrinsics(\n fx_px=fx_px,\n fy_px=fy_px,\n cx_px=cx_px,\n cy_px=cy_px,\n width_px=width_px,\n height_px=height_px,\n )\n K_expected: NDArrayFloat = np.array(\n ([1000, 0, 1024], [0, 1001, 775], [0, 0, 1]), dtype=np.float64\n )\n assert np.array_equal(intrinsics.K, K_expected)", "def setup_camera(self) -> None:\n self.world.camera.update(\n cam_base_pos=(0, -3, 0),\n cam_dist=1.2*self.world.env_dim,\n cam_yaw=0,\n cam_pitch=-60\n )", "def __init__(self, at=(0, 0, 0), eye=(0, 0, -0.1), lens=None,\r\n is_3d=True, scale=1.0):\r\n super(Camera, self).__init__()\r\n\r\n self.at = at\r\n self.start_eye = eye # for reset with different lens settings\r\n self.eye = [eye[0], eye[1], eye[2]]\r\n if lens == None:\r\n from pi3d.Display import Display\r\n lens = [Display.INSTANCE.near, Display.INSTANCE.far, Display.INSTANCE.fov,\r\n Display.INSTANCE.width / float(Display.INSTANCE.height)]\r\n self.lens = lens\r\n self.view = _LookAtMatrix(at, eye, [0, 1, 0])\r\n if is_3d:\r\n self.projection = _ProjectionMatrix(lens[0], lens[1], lens[2] / scale, lens[3])\r\n else:\r\n self.projection = _OrthographicMatrix(scale=scale)\r\n self.model_view = dot(self.view, self.projection)\r\n # Apply transform/rotation first, then shift into perspective space.\r\n self.mtrx = array(self.model_view, copy=True)\r\n # self.L_reflect = _LookAtMatrix(at,eye,[0,1,0],reflect=True)\r\n self.rtn = [0.0, 0.0, 0.0]\r\n\r\n self.was_moved = True", "def add_base64_files(self, file_dict):\n if self.src_keys is None:\n self.src_keys, self.rgb_cam_list, self.rgb_of_depth_cam_list = init_cam_set(file_dict)\n self.src_keys_dict = {v: i for i, v in enumerate(self.src_keys)}\n logger.info('Init Calibrator done.')\n for k, v in file_dict.items():\n filename = str(10000000 + self.counter)[1:]\n if k.startswith('cam'):\n if 'dept' in k:\n continue\n cam_id = self.src_keys_dict[k]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.jpg')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n # print('calib data copy', v, dst_path)\n # print >> sys.stderr, 'calib data copy', v, dst_path\n with open(dst_path, 'w') as fout:\n fout.write(base64.b64decode(v))\n elif k.startswith('xeye'):\n for i, imgb64 in enumerate(v):\n cam_id = self.src_keys_dict[('xeye_image', i)]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.png')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n if self.resize_xeye:\n with open(self.record_path, 'a') as fout:\n fout.write('resize ' + imgb64 + ' ' + dst_path + '\\n')\n # resize_xeye_image_file(imgpath, dst_path)\n else:\n with open(dst_path, 'w') as fout:\n fout.write(base64.b64decode(imgb64))\n else:\n logger.warn('Unrocognize key: {}'.format(k))\n return\n self.counter += 1", "def __enter__(self):\r\n\r\n\t\t#Variables\r\n\t\thCam = ueye.HIDS(0)\t\t\t #0: first available camera; 1-254: The camera with the specified camera ID\r\n\t\tsInfo = ueye.SENSORINFO()\r\n\t\tcInfo = ueye.CAMINFO()\r\n\t\tpcImageMemory = ueye.c_mem_p()\r\n\t\tMemID = ueye.int()\r\n\t\trectAOI = ueye.IS_RECT(1280,1024)\r\n\t\tpitch = ueye.INT()\r\n\t\tnBitsPerPixel = ueye.INT(24)\t#24: bits per pixel for color mode; take 8 bits per pixel for monochrome\r\n\t\tchannels = 3\t\t\t\t\t#3: channels for color mode(RGB); take 1 channel for monochrome\r\n\t\tm_nColorMode = ueye.INT(1)\t\t# Y8/RGB16/RGB24/REG32\r\n\t\tbytes_per_pixel = int(nBitsPerPixel / 8)\r\n\r\n\t\t# Starts the driver and establishes the connection to the camera\r\n\t\tnRet = ueye.is_InitCamera(hCam, None)\r\n\t\tif nRet != ueye.IS_SUCCESS:\r\n\t\t\traise Exception(\"is_InitCamera ERROR\")\r\n\r\n\t\t# Reads out the data hard-coded in the non-volatile camera memory and writes it to the data structure that cInfo points to\r\n\t\tnRet = ueye.is_GetCameraInfo(hCam, cInfo)\r\n\t\tif nRet != ueye.IS_SUCCESS:\r\n\t\t\traise Exception(\"is_GetCameraInfo ERROR\")\r\n\r\n\t\t# You can query additional information about the sensor type used in the camera\r\n\t\tnRet = ueye.is_GetSensorInfo(hCam, sInfo)\r\n\t\tif nRet != ueye.IS_SUCCESS:\r\n\t\t\traise Exception(\"is_GetSensorInfo ERROR\")\r\n\r\n\t\tnRet = ueye.is_ResetToDefault( hCam)\r\n\t\tif nRet != ueye.IS_SUCCESS:\r\n\t\t\traise Exception(\"is_ResetToDefault ERROR\")\r\n\r\n\t\t# Set display mode to DIB\r\n\t\tnRet = ueye.is_SetDisplayMode(hCam, ueye.IS_SET_DM_DIB)\r\n\r\n\t\t# Set the right color mode\r\n\t\tif int.from_bytes(sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_BAYER:\r\n\t\t\t# setup the color depth to the current windows setting\r\n\t\t\tueye.is_GetColorDepth(hCam, nBitsPerPixel, m_nColorMode)\r\n\t\t\tbytes_per_pixel = int(nBitsPerPixel / 8)\r\n\r\n\t\telif int.from_bytes(sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_CBYCRY:\r\n\t\t\t# for color camera models use RGB32 mode\r\n\t\t\tm_nColorMode = ueye.IS_CM_BGRA8_PACKED\r\n\t\t\tnBitsPerPixel = ueye.INT(32)\r\n\t\t\tbytes_per_pixel = int(nBitsPerPixel / 8)\r\n\r\n\t\telif int.from_bytes(sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_MONOCHROME:\r\n\t\t\t# for color camera models use RGB32 mode\r\n\t\t\tm_nColorMode = ueye.IS_CM_MONO8\r\n\t\t\tnBitsPerPixel = ueye.INT(8)\r\n\t\t\tbytes_per_pixel = int(nBitsPerPixel / 8)\r\n\t\t\r\n\t\telse:\r\n\t\t\t# for monochrome camera models use Y8 mode\r\n\t\t\tm_nColorMode = ueye.IS_CM_MONO8\r\n\t\t\tnBitsPerPixel = ueye.INT(8)\r\n\t\t\tbytes_per_pixel = int(nBitsPerPixel / 8)\r\n\r\n\t\t# Can be used to set the size and position of an \"area of interest\"(AOI) within an image\r\n\t\tnRet = ueye.is_AOI(hCam, ueye.IS_AOI_IMAGE_GET_AOI, rectAOI, ueye.sizeof(rectAOI))\r\n\t\tif nRet != ueye.IS_SUCCESS:\r\n\t\t\traise Exception(\"is_AOI ERROR\")\r\n\r\n\t\twidth = rectAOI.s32Width\r\n\t\theight = rectAOI.s32Height\r\n\r\n\t\t# Allocates an image memory for an image having its dimensions defined by width and height and its color depth defined by nBitsPerPixel\r\n\t\tnRet = ueye.is_AllocImageMem(hCam, width, height, nBitsPerPixel, pcImageMemory, MemID)\r\n\t\tif nRet != ueye.IS_SUCCESS:\r\n\t\t\traise Exception(\"is_AllocImageMem ERROR\")\r\n\t\telse:\r\n\t\t\t# Makes the specified image memory the active memory\r\n\t\t\tnRet = ueye.is_SetImageMem(hCam, pcImageMemory, MemID)\r\n\t\t\tif nRet != ueye.IS_SUCCESS:\r\n\t\t\t\traise Exception(\"is_SetImageMem ERROR\")\r\n\t\t\telse:\r\n\t\t\t\t# Set the desired color mode\r\n\t\t\t\tnRet = ueye.is_SetColorMode(hCam, m_nColorMode)\r\n\r\n\r\n\r\n\t\t# Activates the camera's live video mode (free run mode)\r\n\t\tnRet = ueye.is_CaptureVideo(hCam, ueye.IS_DONT_WAIT)\r\n\t\tif nRet != ueye.IS_SUCCESS:\r\n\t\t\traise Exception(\"is_CaptureVideo ERROR\")\r\n\r\n\t\t# Enables the queue mode for existing image memory sequences\r\n\t\tnRet = ueye.is_InquireImageMem(hCam, pcImageMemory, MemID, width, height, nBitsPerPixel, pitch)\r\n\t\tif nRet != ueye.IS_SUCCESS:\r\n\t\t\traise Exception(\"is_InquireImageMem ERROR\")\r\n\r\n\t\t# set attributes\r\n\t\tself.pcImageMemory = pcImageMemory\r\n\t\tself.width = width\r\n\t\tself.height = height\r\n\t\tself.nBitsPerPixel = nBitsPerPixel\r\n\t\tself.pitch = pitch\r\n\t\tself.ueye = ueye\r\n\t\tself.bytes_per_pixel = bytes_per_pixel\r\n\t\tself.hCam = hCam\r\n\t\treturn self", "def write_mir(self, filename):\n raise NotImplementedError", "def parse(self, calibration_px=1.0):\n self.isParsingNeeded = False\n self.meta_data = {}\n self.data = []\n #CZI files\n if self.extend == '.czi':\n with czifile.CziFile(self.file_path) as czi:\n data = czi.asarray()\n Header_Metadata = str(czi).split('<ImageDocument>')\n string = '<ImageDocument>'+Header_Metadata[1]\n #print(string.strip(\"'\"))\n metadata = XMLET.fromstring(string.strip(\"'\"))\n try:\n #Query XML fore the metadata for picture shape(X;Y;Z-stacks).\n #Picture Shape.\n shapes = metadata.findall('./Metadata/Information/Image')[0]\n self.meta_data[\"ShapeSizeX\"] = int(shapes.findall('SizeX')[0].text)\n self.meta_data[\"ShapeSizeY\"] = int(shapes.findall('SizeY')[0].text)\n try:\n self.meta_data[\"ShapeSizeZ\"] = int(shapes.findall('SizeZ')[0].text)\n except:\n self.meta_data[\"ShapeSizeZ\"] = 1\n #Get the hyperstack dimension if the image is a hyperstack.\n try:\n self.meta_data[\"ShapeSizeC\"] = int(shapes.findall('SizeC')[0].text)\n except:\n self.meta_data[\"ShapeSizeC\"] = 1\n print(\"No info of color channels 1 assumed\")\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n PixelSizes = metadata.findall('./Metadata/Scaling/Items/Distance')\n self.meta_data['SizeX'] = float(PixelSizes[0].findall('Value')[0].text)*10**6\n self.meta_data['SizeY'] = float(PixelSizes[1].findall('Value')[0].text)*10**6\n self.meta_data['SizeZ'] = float(PixelSizes[2].findall('Value')[0].text)*10**6\n except(ValueError):\n print (\"Metadata fail\")\n\n #Tiff files.\n #Tiff files are problematic because they most likely wont contain the necessary metadata.\n #Try to get the shape info over common dimensions.\n elif self.extend == '.tif':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray()\n for shape in data.shape:\n if shape <5:\n self.meta_data[\"ShapeSizeC\"] = shape\n elif shape <40:\n self.meta_data[\"ShapeSizeZ\"] = shape\n else:\n self.meta_data[\"ShapeSizeY\"] = shape\n self.meta_data[\"ShapeSizeX\"] = shape\n\n #Read Lsm Files.\n elif self.extend == '.lsm':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray(memmap=True)\n headerMetadata = str(tif.pages[0].cz_lsm_scan_info)\n metadataList = headerMetadata.split(\"\\n*\")\n #Get image shape from lsm header SizeC=0 if not given.\n for shapes in metadataList:\n if \"images_height\" in shapes:\n self.meta_data[\"ShapeSizeX\"]= int(shapes.split()[-1])\n if \"images_width\" in shapes:\n self.meta_data[\"ShapeSizeY\"]= int(shapes.split()[-1])\n if \"images_number_planes\" in shapes:\n self.meta_data[\"ShapeSizeZ\"]= int(shapes.split()[-1])\n if \"images_number_channels\" in shapes:\n self.meta_data[\"ShapeSizeC\"]= int(shapes.split()[-1])\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n data = np.swapaxes(data,1,2)\n lsm_header = str(tif.pages[0].tags.cz_lsm_info)\n LsmInfo = lsm_header.split(\", \")\n i = 0\n #Query for pixel size.\n for element in LsmInfo:\n if \"e-0\" in element:\n i += 1\n if i == 1:\n self.meta_data['SizeX'] = (float(element)*10**6)\n if i == 2:\n self.meta_data['SizeY'] = (float(element)*10**6)\n if i == 3:\n self.meta_data['SizeZ'] = (float(element)*10**6)\n\n elif self.extend == \".png\":\n data = misc.imread(self.file_path)\n data = np.expand_dims(np.expand_dims(data[...,0],0),0)\n self.meta_data[\"ShapeSizeC\"] = 1\n self.meta_data[\"ShapeSizeZ\"] = 1\n self.meta_data[\"ShapeSizeX\"] = data.shape[2]\n self.meta_data[\"ShapeSizeY\"] = data.shape[3]\n self.meta_data[\"SizeZ\"] = 1\n self.meta_data[\"SizeX\"] = 0.01\n self.meta_data[\"SizeY\"] = 0.01\n #Bring all formats in the same shape.\n self.data = np.reshape(data,(self.meta_data[\"ShapeSizeC\"],self.meta_data[\"ShapeSizeZ\"],self.meta_data[\"ShapeSizeX\"],self.meta_data[\"ShapeSizeY\"]))\n self.meta_data['ChannelNum'] = self.meta_data[\"ShapeSizeC\"]\n #Set pixel size to manuell value if there are no metadata.\n if self.meta_data == {}:\n self.set_calibration(calibration_px)\n #Set the box for manuel calibration to the actuell pixel size.", "def save_image_microscope_camera(self, filename: str) -> None:\n filename = self.get_filename(filename)\n t0 = time.time()\n temp_image = self.camera_microscope.temp_image\n while temp_image is None:\n temp_image = self.camera_fiber.temp_image\n if time.time() - t0 > 10:\n raise CameraTimeout(\"It took too long to get a new frame from the microscope\")\n np.save(filename, temp_image)\n self.logger.info(f\"Saved microscope data to {filename}\")", "def write_camera_pose_to_file(camera_pose_abs_dict: dict, pose_dir_path: str) -> None:\n image_dst = path.join(pose_dir_path, 'images.txt')\n with open(image_dst, 'w+') as file:\n file.write('# Image list with two lines of data per image:\\n')\n file.write('# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\\n')\n file.write('# POINTS2D[] as (X, Y, POINT3D_ID)\\n')\n file.write(f'# Number of images: {len(camera_pose_abs_dict.keys())}\\n')\n\n # write each camera pose to file\n for image in camera_pose_abs_dict.keys():\n image_pose_data = []\n t_vector = camera_pose_abs_dict[image][1]\n qx, qy, qz, qw = rotation_matrix_to_quaternion(camera_pose_abs_dict[image][0])\n\n image_pose_data.append(str(image))\n # image_pose_data.append(f'{qw} {qx} {qy} {qz}')\n image_pose_data.append(f'{qz} {qy} {qx} {qw}')\n image_pose_data.append(' '.join(map(str, t_vector)))\n image_pose_data.append('1')\n image_pose_data.append(f'image{image}.jpg')\n\n file.write(' '.join(image_pose_data) + '\\n\\n')", "def adjust_camera(self):\n pose = deepcopy(self.data['poses']['marker']) # PoseStamped()\n eye_pose = deepcopy(pose)\n eye_pose.pose.position.x += 0.60\n eye_pose.pose.position.z += 0.20\n focus_pose = PoseStamped()\n base_eye_pose = PoseStamped()\n\n try:\n # Convert pose to base frame\n pose.header.stamp = self.tfl. \\\n getLatestCommonTime(self.params['world'], pose.header.frame_id)\n focus_pose = self.tfl.transformPose(self.params['world'], pose)\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" + pose.header.frame_id)\n\n try:\n # Convert pose to base frame\n pose.header.stamp = self.tfl. \\\n getLatestCommonTime(self.params['world'],\n eye_pose.header.frame_id)\n base_eye_pose = self.tfl.transformPose(self.params['world'],\n eye_pose)\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" + pose.header.frame_id)\n\n cam_place = CameraPlacement()\n cam_place.target_frame = self.params['world']\n cam_place.time_from_start = Duration(1)\n # Position of the camera relative to target_frame\n cam_place.eye.header.frame_id = cam_place.target_frame\n cam_place.eye.point = base_eye_pose.pose.position\n # Target_frame-relative point for the focus\n cam_place.focus.header.frame_id = cam_place.target_frame\n cam_place.focus.point = focus_pose.pose.position\n # Target_frame-relative vector that maps to \"up\" in the view plane.\n cam_place.up.header.frame_id = cam_place.target_frame\n cam_place.up.vector.x = 0\n cam_place.up.vector.y = 0\n cam_place.up.vector.z = 1\n self.pub.publish(cam_place)\n return", "def camera(self):\n return self.__camera", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def data_vector(self) -> np.ndarray:\r\n return np.dot(\r\n self.linear_obj_list[0].mapping_matrix.T, self.w_tilde.dirty_image\r\n )", "def read_calib_file(self, velo_to_cam, cam_to_cam):\n data = {}\n data2 = {}\n\n data_new = {}\n\n \n with open(cam_to_cam, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0: continue\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n \n with open(velo_to_cam, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0: continue\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data2[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n \"\"\"\n data3 = {}\n with open(imu_to_velo, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0: continue\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data3[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n \"\"\"\n insert = np.insert(data2['R'], [3], data2['T'][0]) \n insert = np.insert(insert, [7], data2['T'][1]) \n Tr_velo_to_cam = np.insert(insert, [11], data2['T'][2]) \n # insert = np.insert(data3['R'], [3], data3['T'][0]) \n # insert = np.insert(insert, [7], data3['T'][1]) \n # Tr_imu_to_velo = np.insert(insert, [11], data3['T'][2]) \n # data_new['Tr_imu_to_velo'] = Tr_imu_to_velo\n data_new['Tr_velo_to_cam'] = Tr_velo_to_cam\n data_new['P0'] = data['P_rect_00']\n data_new['P1'] = data['P_rect_01']\n data_new['P2'] = data['P_rect_02']\n data_new['P3'] = data['P_rect_03']\n data_new['R0_rect'] = data['R_rect_00']\n return data_new", "def prepare_data(cameras, frame_points_3d, frame_points_2d, keyframe_idx):\n camera_params = np.empty((0, 9))\n for c in cameras:\n R, _ = cv2.Rodrigues(c.R_mat)\n camera = build_camera(R, c.t)\n camera_params = np.append(camera_params, [camera], axis=0)\n\n camera_indices = []\n point_indices = []\n points_2d = np.empty((0, 2))\n points_3d = np.empty((0, 3))\n\n camera_id = 0\n pt_id_counter = 0\n for k, pts_2d in enumerate(frame_points_2d):\n if k > 0:\n halfway_idx = keyframe_idx[k] - keyframe_idx[k - 1] - 1\n points_2d = np.vstack((points_2d, frame_points_2d[k-1][halfway_idx]))\n points_3d = np.vstack((points_3d, frame_points_3d[k-1][halfway_idx]))\n camera_indices += [camera_id for _ in range(len(frame_points_2d[k-1][halfway_idx]))]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + len(frame_points_2d[k-1][halfway_idx]))]\n pt_id_counter = pt_id_counter + len(frame_points_2d[k-1][halfway_idx])\n\n if k > 1:\n end_idx = keyframe_idx[k + 1] - keyframe_idx[k - 1] - 3\n points_2d = np.vstack((points_2d, frame_points_2d[k-2][end_idx]))\n points_3d = np.vstack((points_3d, frame_points_3d[k-2][end_idx]))\n camera_indices += [camera_id for _ in range(len(frame_points_2d[k-2][end_idx]))]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + len(frame_points_2d[k-2][end_idx]))]\n pt_id_counter = pt_id_counter + len(frame_points_2d[k-2][end_idx])\n\n points_2d = np.vstack((points_2d, frame_points_2d[k][0]))\n points_3d = np.vstack((points_3d, frame_points_3d[k][0]))\n camera_indices += [camera_id for _ in range(pts_2d.shape[1])]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + pts_2d.shape[1])]\n\n camera_id += 1\n pt_id_counter = pt_id_counter + pts_2d.shape[1]\n\n return camera_params, np.asarray(camera_indices), np.asarray(point_indices), points_3d, points_2d", "def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations" ]
[ "0.6322945", "0.597689", "0.58319306", "0.5828599", "0.57685983", "0.5751193", "0.57245207", "0.5624981", "0.55312103", "0.550769", "0.55075365", "0.5502988", "0.54702735", "0.54578793", "0.53877527", "0.5327595", "0.53023946", "0.5278144", "0.5220536", "0.5179092", "0.516386", "0.5151776", "0.5142039", "0.5104245", "0.5093749", "0.5082651", "0.5080736", "0.5079747", "0.5076021", "0.50640017", "0.50534916", "0.5028077", "0.49683344", "0.49396104", "0.49323702", "0.49073568", "0.48925027", "0.48868027", "0.48752075", "0.48743698", "0.48299697", "0.48269263", "0.48259503", "0.48248574", "0.48186967", "0.4814523", "0.48082536", "0.4807508", "0.48064914", "0.48010716", "0.47871637", "0.4773926", "0.47703004", "0.4767498", "0.4762384", "0.47522032", "0.47340804", "0.473358", "0.4731921", "0.47294882", "0.47292662", "0.47262236", "0.47129804", "0.4709645", "0.4709329", "0.47072542", "0.47006425", "0.46909952", "0.46909538", "0.46909323", "0.46902043", "0.46872935", "0.46872064", "0.46821588", "0.46792108", "0.4672321", "0.4671303", "0.46650848", "0.46623504", "0.4655874", "0.46535334", "0.46482342", "0.46456122", "0.4644927", "0.46434158", "0.46415222", "0.4639402", "0.46372193", "0.46367252", "0.4634995", "0.46316266", "0.4631323", "0.46279302", "0.46235132", "0.4620066", "0.4619567", "0.4618601", "0.4609899", "0.4608387", "0.460785" ]
0.6199243
1
Take keypoint motion data from other node and process it
def new_motion_callback(self, new_motion_msg): # we can't do anything until we have the camera calibration if self.camera_intrinsics is None: # TOmaybeDO: use a wait_for_message instead of missing a frame? return previous_kp = np.stack( (new_motion_msg.prev_x, new_motion_msg.prev_y), axis=1 ) current_kp = np.stack( (new_motion_msg.cur_x, new_motion_msg.cur_y), axis=1 ) f_mat = self.calculate_fundamental_matrix(previous_kp, current_kp) camera_matrix, R_mat, t_mat = self.manually_calculate_pose(f_mat) error_amount, triangulated = self.triangulation( previous_kp, current_kp, self.base_transformation_mat, camera_matrix ) # print np.linalg.norm(np.array(error_amount)) for p in triangulated: print p self.pub_point_cloud.publish( header=Header( stamp=rospy.Time.now(), # TODO: use camera image time frame_id='map' ), points=[Point32(p[0], p[1], p[2]) for p in triangulated] ) # get quaternion from rotation matrix tf_rot = np.identity(4) tf_rot[0:3, 0:3] = R_mat quat = tf.transformations.quaternion_from_matrix(tf_rot) old_quat = self.accumulated_pose.orientation new_quat = tf.transformations.quaternion_multiply( [old_quat.x, old_quat.y, old_quat.z, old_quat.w], quat ) normalized_new_quat = tf.transformations.quaternion_from_euler( *tf.transformations.euler_from_quaternion(new_quat) ) print normalized_new_quat self.accumulated_pose.orientation = Quaternion( *normalized_new_quat ) self.pub_pose.publish( header=Header( stamp=rospy.Time.now(), # TODO: use camera image time frame_id='map' ), pose=Pose( Point( 0, 0, 0 ), self.accumulated_pose.orientation ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PTS(self):", "def dnd_motion(self, source, event):", "def motion_model(particle_poses, speed_command, odom_pose, odom_pose_prev, dt):\n \n M = particle_poses.shape[0]\n \n # TODO. For each particle calculate its predicted pose plus some\n # additive error to represent the process noise. With this demo\n # code, the particles move in the -y direction with some Gaussian\n # additive noise in the x direction. Hint, to start with do not\n # add much noise.\n\n #time is in ns 1e-9\n dt = dt * 1e-9\n \n if dt ==0:\n return particle_poses\n\n for m in range(M):\n\n theta = particle_poses[m, 2]\n\n v = speed_command[0]\n omega = speed_command[1]\n \n if motion_model_velocity: #Velocity\n\n if omega == 0: #straight\n vel_dx = v * cos(theta) * dt\n vel_dy = v * sin(theta) * dt\n vel_dtheta = 0\n\n else:\n vel_dx = -v / omega * sin(theta) + v / omega * sin(theta + omega * dt)\n vel_dy = v / omega * cos(theta) - v / omega * cos(theta + omega * dt)\n vel_dtheta = omega * dt\n \n\n\n if motion_model_odom:\n odom_mov = rev_odm(odom_pose, odom_pose_prev)\n\n #particle_poses[m] = fwd_odm(particle_poses[m], odom_mov)\n\n #odom_dpose = fwd_odm2(particle_poses[m], odom_mov)\n (odom_dx, odom_dy, odom_dtheta) = fwd_odm2(particle_poses[m], odom_mov)\n\n\n\n\n #fusion\n w = motion_weighting\n dx = w * odom_dx + (1-w) * vel_dx\n dy = w * odom_dy + (1-w) * vel_dy\n dtheta = w * odom_dtheta + (1-w) * vel_dtheta\n \n \n\n \n \n #process noise\n if motion_model_noise:\n noise_x= np.random.normal(0, motion_sigma_x)\n noise_y= np.random.normal(0, motion_sigma_y)\n noise_theta= np.random.normal(0, motion_sigma_theta)\n \n #local noise\n if motion_model_noise_alt:\n localnoise_x = np.random.normal(0, motion_sigma_x)\n localnoise_y = np.random.normal(0, motion_sigma_y)\n\n noise_x = localnoise_x * cos(theta) - localnoise_y * sin(theta)\n noise_y = localnoise_y * sin(theta) + localnoise_y * cos(theta)\n noise_theta = np.random.normal(0, motion_sigma_theta)\n\n\n\n particle_poses[m, 0] += dx + noise_x\n particle_poses[m, 1] += dy + noise_y\n particle_poses[m, 2] = wraptopi(theta + dtheta + noise_theta)\n\n \n return particle_poses", "def _update_motion_data(self, msg):\n if self._auv_motion != msg.motion:\n self._target_euler[\"alpha\"] = self._actual_euler[\"alpha\"]\n self._target_euler[\"beta\"] = self._actual_euler[\"beta\"]\n self._target_euler[\"gamma\"] = self._actual_euler[\"gamma\"]\n self._auv_motion = msg.motion\n self._thrusters_actual_speed[\"1\"] = msg.thrusters_speed.thruster_id1_speed\n self._thrusters_actual_speed[\"2\"] = msg.thrusters_speed.thruster_id2_speed\n self._thrusters_actual_speed[\"3\"] = msg.thrusters_speed.thruster_id3_speed\n self._thrusters_actual_speed[\"4\"] = msg.thrusters_speed.thruster_id4_speed\n self._thrusters_actual_speed[\"5\"] = msg.thrusters_speed.thruster_id5_speed\n self._thrusters_actual_speed[\"6\"] = msg.thrusters_speed.thruster_id6_speed\n self._thrusters_actual_speed[\"7\"] = msg.thrusters_speed.thruster_id7_speed\n self._thrusters_actual_speed[\"8\"] = msg.thrusters_speed.thruster_id8_speed", "def __init__(self):\n rospy.init_node('robot_controller')\n self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n self.sleepy = rospy.Rate(2)\n rospy.Subscriber(\"/odom\",Odometry,self.process_odom)\n rospy.Subscriber(\"/person_point\",Point,self.process_person)\n rospy.Subscriber(\"/clear_path_point\",Point,self.process_clear_path)\n\n rospy.on_shutdown(self.stop)\n thread.start_new_thread(self.getKey,())\n\n # make dictionary that calls functions for teleop\n self.state = {'i':self.forward, ',':self.backward,\n 'l':self.rightTurn, 'j':self.leftTurn,\n 'k':self.stop,'n':self.personfollowing,\n 'b':self.clearPathFollowing,'v':self.combinedcontrol}\n self.acceptablekeys = ['i','l','k',',','j','n','b','v']\n self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.sendMessage()\n # get key interupt things\n self.settings = termios.tcgetattr(sys.stdin)\n self.key = None\n #current location and orientation\n self.currentx = 0.0\n self.currenty = 0.0\n self.orientation = 0.0\n #proportional controller constants\n self.kturn = .85\n self.kspeed= .1\n #location of person to be followed\n self.personx = 0.0\n self.persony = 0.0\n #location of target for obstacle avoidance\n self.clearx = 0.0\n self.cleary = 0.0", "def handle_data(self, point: Tuple[float, float], frame) -> None:", "def calculate_frame(self):\n frame = self.stream.read()\n self.keypoints, self.image = self.openpose.forward(frame, True)", "def process_point(meta, data):\n point = dfparser.Point()\n point.ParseFromString(data)\n\n sample_freq = meta['params']['sample_freq']\n threshold = meta['process_params']['threshold']\n\n events_all = []\n for channel in point.channels:\n for i, block in enumerate(channel.blocks):\n SOCKETIO.emit('progress',\n {'val': int((i/len(channel.blocks))*100)})\n eventlet.sleep(0)\n events = []\n for event in block.events:\n data = np.frombuffer(event.data, np.int16)\n events.append(extract_amps_approx2(data, event.time,\n threshold,\n sample_freq)[0])\n events = np.hstack(events)[0::2]\n events_all.append(events)\n\n events_all = np.hstack(events_all)\n return events_all", "def motion_t(input_line, cur, count):\n return start_catching_keys(1, \"cb_motion_t\", input_line, cur, count)", "def scan_motion():\n data1 = get_stream_array()\n while True:\n data2 = get_stream_array()\n diff_count = 0\n for y in range(0, streamHeight):\n for x in range(0, streamWidth):\n # get pixel differences. Conversion to int\n # is required to avoid unsigned short overflow.\n diff = abs(int(data1[y][x][1]) - int(data2[y][x][1]))\n if diff > threshold:\n diff_count += 1\n if diff_count > sensitivity:\n # x,y is a very rough motion position\n return x, y\n data1 = data2", "def main():\n np.random.seed(219)\n rospy.init_node(\"sawyer_dagger_teacher\")\n pub_start = rospy.Publisher('/teacher/start', JointCommand, queue_size=1)\n pub_epi_fin = rospy.Publisher('/teacher/fin', JointCommand, queue_size=1)\n vel_ik_pos_pub = rospy.Publisher('/teacher/ik_vel/', Pose, queue_size = 3)\n pub3 = rospy.Publisher('/ddpg/vel_start/', Float64, queue_size=1)\n pub4 = rospy.Publisher('/ddpg/vel_end/', Float64, queue_size=1)\n goal_obs_pub = rospy.Publisher('/teacher/goal_obs/', Pose, queue_size=1)\n pos_cmd_pub = rospy.Publisher('/teacher/pos_cmd_pub/', PosCmd, queue_size=1)\n\n\n\n rospy.set_param('dagger_reset',\"false\") # param_name, param_value\n\n\n # Load Gazebo Models via Spawning Services\n # Note that the models reference is the /world frame\n # and the IK operates with respect to the /base frame\n # load_gazebo_models()\n # Remove models from the scene on shutdown\n rospy.on_shutdown(delete_gazebo_models)\n\n limb = 'right'\n hover_distance = 0.15 # meters\n # Starting Joint angles for right arm\n starting_joint_angles = {'right_j0': -0.041662954890248294,\n 'right_j1': -1.0258291091425074,\n 'right_j2': 0.0293680414401436,\n 'right_j3': 1.37518162913313,\n 'right_j4': -0.06703022873354225,\n 'right_j5': 0.7968371433926965,\n 'right_j6': 1.7659649178699421}\n\n pnp = PickAndPlace(limb, hover_distance)\n\n pnp.move_to_start(starting_joint_angles)\n\n \n # m_planner = trajectorySender()\n # An orientation for gripper fingers to be overhead and parallel to the obj\n overhead_orientation = Quaternion(\n x=-0.00142460053167,\n y=0.999994209902,\n z=-0.00177030764765,\n w=0.00253311793936)\n block_poses = list()\n # The Pose of the block in its initial location.\n # You may wish to replace these poses with estimates\n # from a perception node.\n block_poses.append(Pose(\n position=Point(x=0.45, y=0.155, z=-0.129),\n orientation=overhead_orientation))\n # Feel free to add additional desired poses for the object.\n # Each additional pose will get its own pick and place.\n block_poses.append(Pose(\n position=Point(x=0.6, y=-0.1, z=-0.129),\n orientation=overhead_orientation))\n # Move to the desired starting angles\n print(\"Running. Ctrl-c to quit\")\n # pnp.move_to_start(starting_joint_angles)\n idx = 0\n rate = rospy.Rate(1)\n block_quat_pose = [0.00142460053167,\n 0.999994209902,\n 0.00177030764765,\n 0.00253311793936]\n if rospy.has_param('vel_calc'):\n rospy.delete_param('vel_calc')\n load_gazebo_models()\n\n while not rospy.is_shutdown():\n\n\n starting_joint_angles['right_j0'] = np.random.uniform(-0.05, 0.05)\n starting_joint_angles['right_j1'] = np.random.uniform(-0.95, -0.85)\n starting_joint_angles['right_j2'] = np.random.uniform(-0.1, 0.1)\n starting_joint_angles['right_j3'] = np.random.uniform(1.6, 1.7)\n\n # starting_joint_angles['right_j0'] = np.random.uniform(-0.75, 0.75)\n # starting_joint_angles['right_j1'] = np.random.uniform(-0.97, -0.80)\n # starting_joint_angles['right_j2'] = np.random.uniform(-0.15, 0.15)\n # starting_joint_angles['right_j3'] = np.random.uniform(1.55, 1.75)\n\n start_pose = [starting_joint_angles['right_j0'], starting_joint_angles['right_j1'],\n starting_joint_angles['right_j2'], starting_joint_angles['right_j3'],\n starting_joint_angles['right_j4'], starting_joint_angles['right_j5'],\n starting_joint_angles['right_j6']]\n \n while not rospy.is_shutdown(): # wait until trajectory is collected for each episode\n if rospy.has_param('dagger_reset'):\n rospy.delete_param('dagger_reset')\n break\n pnp.move_to_start(starting_joint_angles)\n\n\n delete_kinect_camera()\n # delete_gazebo_models()\n delete_gazebo_block()\n rand_x = np.random.uniform(0.45, .75)\n rand_y = np.random.uniform(-0.2, 0.33)\n # rand_x = np.random.uniform(0.44,0.68)\n\n # rand_y = np.random.uniform(-0.20, 0.35)\n pose_block = Pose(position=Point(x=rand_x, y=rand_y, z=1.00)\n , orientation=overhead_orientation)\n pose_rob = Pose(position=Point(x=rand_x-0.015, y=rand_y+0.03, z=0.03), orientation=overhead_orientation) \n\n # rospy.set_param('vel_calc', 'true')\n # pnp.move_to_start(starting_joint_angles)\n # oktogo = pnp.move_to_start_vel_command(start_pose)\n # if rospy.has_param('vel_calc'):\n # rospy.delete_param('vel_calc')\n # loads env\n load_gazebo_block(block_pose=pose_block)\n # load_kinect_camera()\n\n \n\n # rospy.set_param('vel_calc', 'true')\n print 'Reaching target object... Learning...'\n rospy.set_param('epi_start', 'true')\n pnp.reach(pose_rob, pos_cmd_pub)\n # reached = pnp.reach_vel_ctrl(pose_rob)\n rospy.sleep(0.5)\n # if rospy.has_param('vel_calc'):\n # rospy.delete_param('vel_calc')\n # if reached:\n # rospy.set_param('reached', 'true')\n # goal_obs_pub.publish(pose_rob)\n\n\n print 'Reached target object! and Goal obs acquired Resetting...'\n # while not rospy.is_shutdown(): # wait until trajectory is collected for each episode\n # if rospy.has_param('demo_success'):\n # break\n while not rospy.is_shutdown(): # wait until trajectory is collected for each episode\n if rospy.has_param('demo_success'):\n rospy.delete_param('demo_success')\n break\n \n # rospy.delete_param('demo_success')\n \n\n return 0", "def motion_extraction():\n # iterate through frames\n global frame_height, frame_width\n global limb_coords, init_coords\n frame_count = 0\n has_frames, frame = capture.read()\n\n while has_frames:\n img_out = frame.copy()\n img_out = insert_padding(img_out, 14*14, 12*14)\n\n if frame_count == 0:\n # change global values of height and width\n frame_height = frame_height + 14*14*2\n frame_width = frame_width + 12*14*2\n get_start_positions(img_out)\n img_out2 = segment_red(img_out, 200, 130)\n #erode(img_out2, 4, 6)\n remove_artifacts(img_out2)\n #enhance_contrast(img_out2)\n\n if frame_count > 0:\n get_motion(prev_frame, img_out2, frame_count)\n\n prev_frame = img_out2.copy()\n frame_count += 1\n has_frames, frame = capture.read()", "def handle_pose(msg):\n global sensor_cfg\n global no_position\n global body_frame\n global frame_cfg\n\n quat = np.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])\n pos = np.array([msg.pose.position.x*1000, msg.pose.position.y*1000, msg.pose.position.z*1000])\n\n if position_mode == \"zero_pos\":\n pos = np.array([0, 0, 0])\n elif position_mode == \"relative\":\n pos = pos - parent_position\n\n br = tf.TransformBroadcaster()\n\n br.sendTransform(pos,\n quat,\n msg.header.stamp,\n body_frame,\n msg.header.frame_id)\n\n for k in frame_cfg:\n br.sendTransform(np.array([float(x) for x in frame_cfg[k][\"position\"].split(\" \")]),\n np.array([float(x) for x in frame_cfg[k][\"pose\"].split(\" \")]),\n rospy.Time.now(),\n k,\n body_frame)\n\n for k in sensor_cfg:\n br.sendTransform(np.array([float(x) for x in sensor_cfg[k][\"position\"].split(\" \")]),\n np.array([float(x) for x in sensor_cfg[k][\"pose\"].split(\" \")]),\n rospy.Time.now(),\n k,\n body_frame)\n\n for k in thruster_cfg:\n br.sendTransform(np.array([float(x) for x in sensor_cfg[k][\"position\"].split(\" \")]),\n np.array([float(x) for x in sensor_cfg[k][\"pose\"].split(\" \")]),\n rospy.Time.now(),\n k,\n body_frame)", "def _process_msg(self, data):\n\n # print(data)\n\n tilt = data.linear.x\n pan = data.angular.z\n\n #self.controller.pantilt_magnitude(pan, tilt)\n self.controller.pantilt_increase_angle(pan * self.scale_pan, tilt * self.scale_tilt)", "def _target_callback(self, msg):\n self.target_pose = np.asarray(msg.pos)[np.newaxis].T\n self.target_vel = np.asarray(msg.vel)[np.newaxis].T\n self.target_acc = np.asarray(msg.acc)[np.newaxis].T\n\n print(\"\\nGoing to:\")\n print(\"Pos: \\n\" + str(self.target_pose))\n print(\"Vel: \\n\" + str(self.target_vel))\n print(\"Acc: \\n\" + str(self.target_acc))", "def follow_ar_tag(zumy, ar_tags):\n print(zumy)\n print(ar_tags)\n # YOUR CODE HERE\n\n #copy paste from lab2, change the proper nouns. Pumbing to make a publisher\n\n # Create a timer object that will sleep long enough to result in\n # a 10Hz publishing rate\n r = rospy.Rate(10) # 10hz\n\n\n #plumbing to get transforms:\n listener = tf.TransformListener()\n\n # Loop until the node is killed with Ctrl-C\n while not rospy.is_shutdown():\n print(\"loop \" + str(random.random()))\n #ok, need to compute the transform between tag[0] (stationary) and tag[1] (zumy)\n try_success = False\n try:\n now = rospy.Time.now()\n \n listener.waitForTransform(ar_tags['ar1'],ar_tags['arZ'],now,rospy.Duration(1))\n (trans, rot) = listener.lookupTransform(ar_tags['ar1'], ar_tags['arZ'],now)# rospy.Time(0))\n \n rbt = ats.return_rbt(np.array(trans),np.array(rot))\n\n\n #print('gab between ' + ar_tags['ar1'] + ' and ' + ar_tags['arZ'])\n #print rbt\n \n twist = ats.compute_twist(rbt=rbt)\n #print('twist between ' + ar_tags['ar1'] + ' and ' + ar_tags['arZ'])\n #print twist\n\n #now, need to create a geomeryy Msg Twist.\n\n\n\n theta = math.degrees(math.atan2(rbt[1][3],rbt[0][3]))\n print(theta)\n #extract theta out of RBT, because we want zumy to move directly towards the target, not move to park ontop of (and with the orientation of) the target\n \n last_seen_zumy = time.time()\n try_success = True \n except:\n traceback.print_exc()\n #print(\"in Except\")\n print(\"AR TAGS EXISTANCE IS AS FOLLOWS: \")\n print(listener.frameExists(ar_tags['ar1']))\n print(listener.frameExists(ar_tags['arZ']))\n print ''\n\n global last_seen_zumy\n out = Twist()\n\n if (time.time() - last_seen_zumy > 2):\n #I haven't seen zumy for 2 seconds. Issue E-stop\n #do publish here so i can be more selective on when to NOT publish stops.\n pub.publish(out)\n print(\"Stop!\")\n\n elif try_success:\n\n out = Twist()\n \n #if abs(theta) < 5:\n #go forward!\n #print(\"FORWARD!\")\n\n #ok, two parts: going forward, and turning.\n\n print(trans)\n\n dist = trans[0]\n print(\"dist is \" + str(dist))\n print(\"Theta is \" + str(theta))\n \n if abs(dist)>1:\n linear_gain = .13\n elif abs(dist) > .5:\n linear_gain = .12\n else:\n linear_gain = .10\n\n if dist > .12:\n out.linear.x = linear_gain# * twist[0][0]\n else:\n \n #mission accomplished... i'm really close\n pass\n\n #else:\n #just rotate towards objective.\n #let's come up with a new RBT that achieve those goals... just rotation, no translation.\n omega = np.array([0,0,1]) #rotate about z axis?\n theta_rad = math.radians(theta)\n trans = np.array([0,0,0]) #no translation!\n rbt = eqf.create_rbt(omega,theta_rad,trans)\n twist = ats.compute_twist(rbt=rbt)\n\n #ok, so now I have a rotational axis. I now need to scale it by something... ok, what about linear function?\n\n if abs(theta) > 20:\n rotational_gain = .18\n else:\n rotational_gain = .18*abs(theta/20)\n\n out.angular.z = rotational_gain*twist[1][2]\n \n\n print(out)\n\n pub.publish(out)\n #S IS STUFF TO PUBLISH\n #pub.publish(s,rospy.get_time())\n\n # Use our rate object to sleep until it is time to publish again\n r.sleep()\n \n exit_handler()", "def receive_data(self):\n while True:\n\n data = self.recv_end()\n\n if len(data) > 0:\n\n try:\n self.current_data = json.loads(data)\n self.current_3D_points = base64.b64decode(self.current_data['points'])\n self.is_new_data = True\n\n except:\n # print('cannot load DepthCamera data')\n pass", "def step(self, action):\n \"\"\" Action is a motion command \"\"\"\n rich_obs, reward, done, info = super(ColoredEgoCostmapRandomAisleTurnEnv, self).step(action)\n obs = self._extract_egocentric_observation(rich_obs)\n return obs, reward, done, info", "def on_message(self, simulation_id,message):\n\n try:\n message_str = 'received message ' + str(message)\n\n json_msg = yaml.safe_load(str(message))\n\n if type(json_msg) != dict:\n raise ValueError(\n ' is not a json formatted string.'\n + '\\njson_msg = {0}'.format(json_msg))\n\n # fncs_input_message = {\"{}\".format(simulation_id): {}}\n measurement_values = json_msg[\"message\"][\"measurements\"]\n\n # storing the magnitude and measurement_mRID values to publish in the dnp3 points for measurement key values\n for y in measurement_values:\n # print(self.processor_point_def.points_by_mrid())\n m = measurement_values[y]\n if \"magnitude\" in m.keys():\n for point in self.outstation.get_agent().point_definitions.all_points():\n #print(\"point\",point)\n #print(\"y\",y)\n if m.get(\"measurement_mrid\") == point.measurement_id and point.magnitude != m.get(\"magnitude\"):\n point.magnitude = m.get(\"magnitude\")\n self.outstation.apply_update(opendnp3.Analog(point.magnitude), point.index)\n\n elif point.measurement_type == \"VA\" and \"VAR\" in point.name:\n angle = math.radians(m.get(\"angle\"))\n point.magnitude = math.sin(angle) * m.get(\"magnitude\")\n self.outstation.apply_update(opendnp3.Analog(point.magnitude), point.index)\n \n elif point.measurement_type == \"VA\" and \"Watts\" in point.name:\n angle1 = math.radians(m.get(\"angle\"))\n point.magnitude = math.cos(angle1) * m.get(\"magnitude\")\n self.outstation.apply_update(opendnp3.Analog(point.magnitude), point.index)\n \n elif point.measurement_type == \"VA\" and \"angle\" in point.name:\n angle2 = math.radians(m.get(\"angle\"))\n #point.magnitude = math.cos(angle1) * m.get(\"magnitude\")\n self.outstation.apply_update(opendnp3.Analog(angle2), point.index)\n \n \n elif \"value\" in m.keys():\n for point in self.outstation.get_agent().point_definitions.all_points():\n if m.get(\"measurement_mrid\") == point.measurement_id and point.value != m.get(\"value\"):\n point.value = m.get(\"value\")\n self.outstation.apply_update(opendnp3.Binary(point.value), point.index)\n except Exception as e:\n message_str = \"An error occurred while trying to translate the message received\" + str(e)", "async def on_dpad(event, data):\n x_axis = data[0]\n y_axis = data[1]\n ArmDevice.storage.command[4] = y_axis*wrist_pitch_speed\n ArmDevice.storage.command[5] = x_axis*gripper_rotation_speed", "def run(self):\n while self.is_connected():\n self.__ticker.tick() # Tick (sleep)\n\n if self.process and self.process.is_alive():\n self.update()\n continue\n\n c = getkey() \n if c:\n if c == 'w':\n print \"Moving forward\"\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"mmove\")\n elif c == 'a':\n print \"Turning left\"\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"mleft\")\n elif c == 'd':\n print \"Turning right\"\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"mright\")\n elif c == 'f':\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"finish\")\n elif c == 'p':\n self.add_property(\"name\", \"remote_command\")\n self.add_property(\"pioneer_command\", \"record\")\n elif c == 'h':\n print \"[w] = forward [a] = left [d] = right [f] = finish\"\n\n \n ############################\n # Send data\n self.update()", "def run():\n rospy.init_node(\"time\")\n mpc = PointFollowerMPC(horizon_length=HORIZON_LENGTH, time_step=TIME_STEP)\n mpc.setup()\n FarthestPointFollower(mpc=mpc).start()", "def motion_T(input_line, cur, count):\n return start_catching_keys(1, \"cb_motion_T\", input_line, cur, count)", "def listener():\n rospy.Subscriber(\"motion_plan\", FloatList, callback)\n rospy.spin()", "def scan_received(self, msg):\n # print msg\n if not(self.initialized):\n # wait for initialization to complete\n return\n\n if not(self.tf_listener.canTransform(self.base_frame,msg.header.frame_id,msg.header.stamp)):\n # need to know how to transform the laser to the base frame\n # this will be given by either Gazebo or neato_node\n return\n\n if not(self.tf_listener.canTransform(self.base_frame,self.odom_frame,msg.header.stamp)):\n # need to know how to transform between base and odometric frames\n # this will eventually be published by either Gazebo or neato_node\n return\n\n # print 'msg.header.frame_id', msg.header.frame_id\n # calculate pose of laser relative ot the robot base\n p = PoseStamped(header=Header(stamp=rospy.Time(0),\n frame_id=msg.header.frame_id))\n self.laser_pose = self.tf_listener.transformPose(self.base_frame,p)\n\n # find out where the robot thinks it is based on its odometry\n # listener.getLatestCommonTime(\"/base_link\",object_pose_in.header.frame_id)\n # p = PoseStamped(header=Header(stamp=msg.header.stamp,\n p = PoseStamped(header=Header(stamp=self.tf_listener.getLatestCommonTime(self.base_frame, self.map_frame),\n # p = PoseStamped(header=Header(stamp=rospy.Time.now(),\n frame_id=self.base_frame),\n pose=Pose())\n # p_aux = PoseStamped(header=Header(stamp=self.tf_listener.getLatestCommonTime(\"/base_link\",\"/map\"),\n p_aux = PoseStamped(header=Header(stamp=self.tf_listener.getLatestCommonTime(self.odom_frame, self.map_frame),\n # p_aux = PoseStamped(header=Header(stamp=rospy.Time.now(),\n frame_id=self.odom_frame),\n pose=Pose())\n odom_aux = self.tf_listener.transformPose(self.map_frame, p_aux)\n odom_aux_xy_theta = convert_pose_to_xy_and_theta(odom_aux.pose)\n # print 'odom_aux_xy_theta', odom_aux_xy_theta\n\n self.odom_pose = self.tf_listener.transformPose(self.odom_frame, p)\n # print 'self.odom_pose', self.odom_pose\n # (trans, root) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))\n # self.odom_pose = trans\n # print trans, root\n new_odom_xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)\n # new_odom_xy_theta = convert_pose_to_xy_and_theta(self.laser_pose.pose)\n xy_theta_aux = (new_odom_xy_theta[0]+odom_aux_xy_theta[0], \n new_odom_xy_theta[1]+odom_aux_xy_theta[1], new_odom_xy_theta[2])\n self.xy_theta_aux = xy_theta_aux\n\n if not(self.particle_cloud):\n self.initialize_particle_cloud(xy_theta_aux)\n self.current_odom_xy_theta = new_odom_xy_theta\n\n elif (math.fabs(new_odom_xy_theta[0] - self.current_odom_xy_theta[0]) > self.linear_mov or\n math.fabs(new_odom_xy_theta[1] - self.current_odom_xy_theta[1]) > self.linear_mov or\n math.fabs(new_odom_xy_theta[2] - self.current_odom_xy_theta[2]) > self.angular_mov):\n\n self.update_particles_with_odom(msg)\n self.update_particles_with_laser(msg)\n self.resample_particles()\n\n self.publish_particles(msg)", "def pose_cb(self, msg): # This is being called at 50 Hz\n\n # Update time and count information about received poses\n now = time.time()\n delta_t = 0 if self.previous_pose_cb_time is None else now-self.previous_pose_cb_time\n if self.previous_pose_cb_time is None:\n self.previous_pose_cb_time = now\n if delta_t < self.min_update_int:\n return\n self.previous_pose_cb_time = now\n self.total_time += delta_t\n self.received_pose_count += 1\n\n rospy.logdebug('Processing pose #{}; delta_t from previous processing is {}s with average {}s'.format(self.received_pose_count,\n delta_t,\n self.total_time/self.received_pose_count))\n\n # Determine the index in `self.waypoints` of the first waypoint in front of the car\n pose_i = get_next_waypoint_idx(msg.pose, self.waypoints, self.prev_wp_idx)\n\n if (pose_i >= len(self.waypoints) or pose_i < 0) and not self.end_of_track_notified:\n rospy.loginfo('Reached the end of the waypoints track.')\n self.end_of_track_notified = True\n\n # Store the found value; search will start from it at the next iteration, for efficiency\n if pose_i < self.prev_wp_idx:\n rospy.logdebug(\"Going backward? Got pose_i < self.prev_wp_idx: pose_i={} self.prev_ws_idx={}\".format(pose_i, self.prev_wp_idx))\n self.prev_wp_idx = pose_i\n\n # Get the traffic light status (it is -1 if no yellow/red traffic light ahead)\n tl_wp_i = self.get_tl()\n rospy.logdebug(\"Getting tl_wp_i={}\".format(tl_wp_i))\n\n if tl_wp_i >=0:\n tl_wp_i = self.take_margin(tl_wp_i, 4)\n\n lane = Lane()\n lane.header.frame_id = '/world'\n lane.header.stamp = rospy.Time(0)\n\n ''' \n Collect LOOKAHEAD_WPS waypoints starting from the given index, if there is at least one waypoint ahead\n of the car, and the car is not at, or past, a red/yellow traffic light\n '''\n\n if pose_i >= 0 and (tl_wp_i < 0 or pose_i < tl_wp_i):\n for count in xrange(LOOKAHEAD_WPS):\n i = pose_i+count\n if i >= len(self.waypoints): # Car must stop at the end of the waypoints track\n break\n wp = copy.deepcopy(self.waypoints[i])\n # Cap the linear velocity at self.enforced_speed_limit\n wp.twist.twist.linear.x = min(wp.twist.twist.linear.x, self.enforced_speed_limit)\n lane.waypoints.append(wp)\n\n # Handle traffic lights\n if tl_wp_i >= 0: # If there is a red traffic light in front of the car...\n # If already at (or past) the stop waypoint, make sure the car stops and doesn't move\n if pose_i >= tl_wp_i:\n lane.waypoints = []\n # If the waypoint where to stop is within LOOKAHEAD_WPS from the current closest waypoint...\n elif pose_i+LOOKAHEAD_WPS > tl_wp_i:\n # ... then plan to stop\n lane.waypoints = plan_stop(lane.waypoints,\n tl_wp_i-pose_i,\n self.decel_limit/3.,\n self.decel_limit,\n self.enforced_speed_limit)\n\n '''if len(lane.waypoints) >= 2 and lane.waypoints[0].twist.twist.linear.x == 0 and lane.waypoints[1].twist.twist.linear.x > 0:\n cicco = 1\n pass'''\n self.final_waypoints_pub.publish(lane)\n total_time = time.time() - now\n rospy.logdebug('Time spent in pose_cb: {}s'.format(total_time))", "def target_position(self, time):\n # get joint positions and use fk to get end effector position?\n # ar_tag from topic\n\n cur_pos = self.target_velocity(time)*time + self.start_pos\n\n self.points_generated.append(cur_pos)\n #print(self.start_pos)\n # print(cur_pos)\n return cur_pos", "def start(self):\n\n rospy.loginfo(self.name + \": Node started\")\n rospy.set_param(\"path_logger_active\", False)\n\n rospy.sleep(1)\n\n self.read_waypoints_pickle()\n rospy.loginfo(self.name + \": Global waypoints read from file\")\n\n while True:\n if self.uav_pose is None:\n rospy.loginfo(self.name + \": Waiting for UAV Pose\")\n self._rate_reached_waypoint.sleep()\n else:\n uav_pose_start = copy.copy(self.uav_pose) # copy is needed here, because uav_pose is mutable!\n rospy.loginfo(self.name + \": UAV Pose received\")\n break\n\n # Set mode to Offboard, Arm the UAV and takeoff to set altitude\n self._takeoff_procedure(uav_pose_start)\n rospy.sleep(1) # To prevent that takeoff goes directly into path following\n rospy.loginfo(self.name + ': Takeoff procedure finished')\n\n # Start publishing global waypoints\n uav_pose_after_takeoff = copy.copy(self.uav_pose)\n wp_global_previous_temp = Waypoint()\n wp_global_previous_temp.x_lat = uav_pose_after_takeoff.pose.position.x\n wp_global_previous_temp.y_long = uav_pose_after_takeoff.pose.position.y\n wp_global_previous_temp.z_alt = uav_pose_after_takeoff.pose.position.z\n wp_global_previous_temp = copy.copy(wp_global_previous_temp)\n self.waypoint_global_next = self.waypoint_global_all.waypoints[0]\n self.waypoint_global_previous = wp_global_previous_temp\n self._thread_waypoint_global.start()\n\n # Activate path logging node. Maybe not best coding practice to do this with a parameter and not a publish/\n # subscriber or service but the path logger was only needed to record test results\n rospy.set_param(\"path_logger_active\", True)\n\n # Starts forwarding the setpoints from the local planner\n self._thread_forward_local_setpoints.start()\n\n # Stops sending the takeoff waypoint. Between this and\n # sending the next waypoint from the local planner can be a maximum of .5 seconds, since waypoints have\n # to be published with >2Hz (PX4/MAVROS restriction)\n self._thread_takeoff_setpoint.do_run = False\n\n # Iterates over all global waypoints\n for wp_global_current in self.waypoint_global_all.waypoints:\n self.waypoint_global_next = wp_global_current\n self.waypoint_global_previous = wp_global_previous_temp\n rospy.loginfo(self.name + ': Published new global waypoint')\n\n while not self._is_at_position(self.uav_pose, wp_global_current, atol=self.tol_wp_reached) \\\n and not rospy.is_shutdown():\n self._rate_reached_waypoint.sleep()\n\n rospy.loginfo(self.name + ': Reached previous global waypoint')\n wp_global_previous_temp = copy.copy(wp_global_current)\n\n self.finished = True\n rospy.set_param(\"path_logger_active\", False)\n self._thread_forward_local_setpoints.do_run = False # Stops forwarding the setpoints from the local planner\n rospy.loginfo(self.name + ': Reached final global waypoint')\n rospy.sleep(10)\n return", "def movement(self):", "def advance(distance, angle, da):\n cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=5)\n # How fast will we update the robot's movement?\n rate = 10\n # Set the equivalent ROS rate variable\n r = rospy.Rate(rate)\n # Set the forward linear speed to 0.2 meters per second\n if distance >= 0.0:\n linear_speed = 0.5\n else:\n linear_speed = -0.5\n # Set the travel distance in meters\n goal_distance = abs(distance)\n # Set the rotation speed in radians per second\n if angle < 0.0:\n angular_speed = -0.5\n else:\n angular_speed = 0.5\n # Set the angular tolerance in degrees converted to radians\n angular_tolerance = radians(0.5)\n # Set the rotation angle to angle in radians \n goal_angle = angle\n # Initialize the tf listener\n tf_listener = tf.TransformListener()\n # Give tf some time to fill its buffer\n rospy.sleep(2)\n # Set the map frame\n map_frame = '/map'\n # Set the odom frame\n odom_frame = '/odom'\n \"\"\" Find out if the robot uses /map->/odom transform \"\"\"\n try:\n tf_listener.waitForTransform(map_frame, odom_frame, rospy.Time(), rospy.Duration(1.0))\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"Cannot find transform between /map and /odom\")\n rospy.signal_shutdown(\"tf Exception\") \n # Find out if the robot uses /base_link or /base_footprint\n try:\n tf_listener.waitForTransform(odom_frame, '/base_footprint', rospy.Time(), rospy.Duration(1.0))\n base_frame = '/base_footprint'\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n try:\n tf_listener.waitForTransform(odom_frame, '/base_link', rospy.Time(), rospy.Duration(1.0))\n base_frame = '/base_link'\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"Cannot find transform between /odom and /base_link or /base_footprint\")\n rospy.signal_shutdown(\"tf Exception\") \n # Initialize the position variable as a Point type\n position = Point() \n # Initialize the movement command\n move_cmd = Twist()\n \n\n # Get the starting position values \n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n x_start = position.x\n y_start = position.y\n \n # Keep track of the distance traveled\n dist = 0.0\n #pdb.set_trace()\n if da:\n print bcolors.OKGREEN + \"da True\" + bcolors.ENDC\n print bcolors.OKGREEN + \"Empieza distancia\" + bcolors.ENDC\n # Set the movement command to forward motion\n move_cmd.linear.x = linear_speed\n bump_count = 0\n # Enter the loop to move along\n while dist < goal_distance and not rospy.is_shutdown():\n #pdb.set_trace()\n last_dist = dist\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current position\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the Euclidean distance from the start\n dist = sqrt(pow((position.x - x_start), 2) + pow((position.y - y_start), 2))\n \n if dist == last_dist and dist != 0.0:\n bump_count += 1\n print \"dist, goal_distance\", dist, goal_distance\n print \"BUMP\"+str(bump_count)\n if bump_count > 10:\n # Move forward for a time to go the desired distance\n linear_duration = 1.5/abs(linear_speed) \n ticks = int(linear_duration * rate)\n move_cmd.linear.x *= -1\n for t in range(ticks):\n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n continue\n # Stop the robot before the rotation\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n \n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n # Track the last angle measured\n last_angle = quat_to_angle(rotation)\n print bcolors.OKGREEN + \"Empieza angle\" + bcolors.ENDC\n # Track how far we have turned\n turn_angle = 0\n done = False\n while abs(turn_angle + angular_tolerance) < abs(goal_angle) and not rospy.is_shutdown():\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the amount of rotation since the last loop\n delta_angle = normalize_angle(quat_to_angle(rotation) - last_angle)\n # Add to the running total\n turn_angle += delta_angle\n last_angle = quat_to_angle(rotation)\n\n if (abs(turn_angle + angular_tolerance) > abs(goal_angle*4/5) or abs(goal_angle) < radians(2)) and not done:\n #pdb.set_trace()\n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n if angle < 0.0:\n angular_speed = -0.05\n else:\n angular_speed = 0.05\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n done = True\n \n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n else:\n print bcolors.OKGREEN + \"da False\" + bcolors.ENDC\n #pdb.set_trace()\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n # Track the last angle measured\n last_angle = quat_to_angle(rotation)\n print bcolors.OKGREEN + \"Empieza angle\" + bcolors.ENDC\n # Track how far we have turned\n turn_angle = 0\n done = False\n while abs(turn_angle + angular_tolerance) < abs(goal_angle) and not rospy.is_shutdown():\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the amount of rotation since the last loop\n delta_angle = normalize_angle(quat_to_angle(rotation) - last_angle)\n # Add to the running total\n turn_angle += delta_angle\n last_angle = quat_to_angle(rotation)\n# print \"x\", position.x\n# print \"y\", position.y\n# print \"la\", last_angle\n# print \"ta\", degrees(turn_angle)\n# print \"\\n\"\n #raw_input(\"Press ENTER to continue ...\")\n if (abs(turn_angle + angular_tolerance) > abs(goal_angle*4/5) or abs(goal_angle) < radians(2)) and not done:\n #pdb.set_trace()\n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n if angle < 0.0:\n angular_speed = -0.05\n else:\n angular_speed = 0.05\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n done = True\n \n # Stop the robot before the next movement\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n print bcolors.OKGREEN + \"Empieza distancia\" + bcolors.ENDC \n #pdb.set_trace()\n # Get the starting position values \n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n x_start = position.x\n y_start = position.y\n \n move_cmd.linear.x = linear_speed\n # Keep track of the distance traveled\n dist = 0.0\n bump_count = 0\n # Enter the loop to move along\n while dist < goal_distance and not rospy.is_shutdown():\n last_dist = dist\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current position\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the Euclidean distance from the start\n dist = sqrt(pow((position.x - x_start), 2) + pow((position.y - y_start), 2))\n \n if dist == last_dist and dist != 0.0:\n bump_count += 1\n print \"dist, goal_distance\", dist, goal_distance\n print \"BUMP\"+str(bump_count)\n if bump_count > 10:\n # Move forward for a time to go the desired distance\n linear_duration = 1.5/abs(linear_speed) \n ticks = int(linear_duration * rate)\n move_cmd.linear.x *= -1\n for t in range(ticks):\n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n continue\n # Stop the robot before the rotation\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n\n # Stop the robot for good\n cmd_vel_pub.publish(Twist())\n rospy.sleep(1)\n\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n return (position, rotation)", "def get_motion(frame1k, frame2k, frame_count):\n frame1 = frame1k.copy()\n frame2 = frame2k.copy()\n\n global limb_coords, init_coords, num_blocks\n cv2.imwrite(\"thisImageAnalyse.png\", frame2)\n block_size = 3\n block_rad = int(block_size/2)\n\n def get_SSD():\n \"\"\" applies SSD formula to search area\n :return SSD value\"\"\"\n dist = 0\n # traversal of pixels in potential Bi+1 block\n # compare corresponding pixel positions with source block in f1 and neighbour block in f2\n y1 = center_y1 - block_rad # start pos.\n for y2 in range(center_y2 - block_rad, (center_y2 - block_rad + block_size)):\n x1 = center_x1 - block_rad # start pos\n for x2 in range(center_x2 - block_rad, (center_x2 - block_rad + block_size)):\n try:\n # displacement formula for RGB channels of each pixel in block\n dist = dist + (frame1[y1][x1][0] - frame2[y2][x2][0])**2 + (frame1[y1][x1][1] - frame2[y2][x2][1])**2 + (frame1[y1][x1][2] - frame2[y2][x2][2])**2\n except RuntimeWarning:\n pass\n x1 += 1\n y1 += 1\n return math.sqrt(dist)\n\n # for each body part\n b = 0\n while b < 5:\n avg_x = 0.0\n avg_y = 0.0\n new_x = 0.0\n new_y = 0.0\n a = 0\n # for each block on body part (9 total)\n while a < num_blocks:\n found = False\n search_rad = 5\n while found is False:\n center_y1 = int(init_coords[b][a][0])\n center_x1 = int(init_coords[b][a][1])\n min_SSD = 999999\n # for pythagoras to ensure closest block gets picked when equality occurs of SSD value\n min_d = 999999\n # this finds the center of the block to compare\n for factor_y in range(-search_rad, search_rad + 1):\n center_y2 = center_y1 + block_size*factor_y\n y_dist = center_y1 - abs(center_y2)\n for factor_x in range(-search_rad, search_rad + 1):\n center_x2 = center_x1 + block_size*factor_x\n x_dist = center_x1 - abs(center_x2)\n # pythagoras\n d = math.sqrt((y_dist**2 + x_dist**2))\n if d < min_d:\n min_d = d\n\n SSD = get_SSD()\n if frame2[center_y2][center_x2][1] != 0 and frame2[center_y2][center_x2][2] != 0:\n found = True\n if SSD < min_SSD:\n min_SSD = SSD\n new_y = center_y2\n new_x = center_x2\n elif SSD == min_SSD and d < min_d:\n new_y = center_y2\n new_x = center_x2\n if found is False:\n # if no block is found repeat the search, increasing the search size by 1\n search_rad += 1\n # draw extracted vectors\n cv2.arrowedLine(frame1k, (int(center_x1), int(center_y1)), (int(new_x), int(new_y)), (150, 200, 30), 1, 4, 0, 0.3)\n avg_x += new_x\n avg_y += new_y\n init_coords[b][a][0] = new_y\n init_coords[b][a][1] = new_x\n a += 1\n cv2.imwrite('monkeyFrames/contrast_enhanced%d.png' % frame_count, frame1k)\n limb_coords[b][frame_count][0] = int(avg_y/num_blocks)\n limb_coords[b][frame_count][1] = int(avg_x/num_blocks)\n b += 1", "def handle_sensor_data(data):\n\n #Store incoming data in the Data object\n D.data = data\n\n #Check for a bump\n if data.bumpRight or data.bumpLeft:\n print \"Bumped!\"\n\n\n #Check if play button was pressed\t\n if data.play:\n\tprint \"Stopping...\"\n\tStateMachine.state_stop()\n\trospy.signal_shutdown(\"play button pressed\")\n\n #Check key presses\n key_press = cv.WaitKey(5) & 255\n if key_press != 255:\n \tcheck_key_press(D, key_press)\t\n\n #Display robot updates in Monitor window\n draw_on_image(D)", "def message_handler(message):\n t = telemetry.Telemetry(message)\n for key, value in t.points.items():\n print()\n print('-point: ', key)\n print('-- point_name: ', value.point_name)\n print('-- present_value: ', value.present_value)\n message.ack()", "def update(self):\n while not rospy.is_shutdown():\n self.calculate_frame()\n for callback in self.callbacks:\n callback(self.keypoints, self.image)", "def OnTokenMotion(self, event):\n\n # if Playline is holded then ignore\n if self.myMainGUI.myPlayLine.isPlayLineHold:\n return\n\n # calculate x difference\n delta_x = event.x - self._drag_data[\"x\"]\n\n # Move all beats objects (lines and texts)\n for i in range(len(self.canvas_SG)):\n for it in self.canvas_SG[i].find_withtag('beattoken'):\n self.canvas_SG[i].move(it, delta_x, 0)\n\n # record the new position\n self._drag_data[\"x\"] = event.x", "def ev_controlleraxismotion(self, event: tcod.event.ControllerAxis) -> T | None:", "def realtime(self):", "def handle_function(self, data):\n # get the image data from the dictionary\n image = data['image']\n\n # decode the image back to its original form from a byte string\n image = cv2.imdecode(np.asarray(bytearray(image), dtype=np.uint8), 1)\n\n # use the global variable to get the steering angle using the controller\n global global_steer\n global_steer = self.controller.get_steering_angle(image, args.horizon)", "def lineFollowTillIntersectionRightPID(kp = 1.0, ki = 0, kd = 0, color = ColorSensor(INPUT_1), color2 = ColorSensor(INPUT_3), \n robot = MoveSteering(OUTPUT_A, OUTPUT_B)): # *an intersection is a line that is going through the line that the robot is following\n \n color.mode = 'COL-REFLECT' #setting color mode\n color2.mode = 'COL-REFLECT' #setting color mode\n lasterror = 0 \n sound = Sound()\n while color2.reflected_light_intensity <= Constants.WHITE and False == Constants.STOP:\n error = ((Constants.WHITE + Constants.BLACK)/2) - color.reflected_light_intensity # colorLeft.reflected_light_intensity - colorRight.reflected_light_intensity\n # correction = error * GAIN # correction = PID(error, lasterror, kp, ki, kd)\n correction = PIDMath(error=error, lasterror = lasterror, kp=kp, ki=ki, kd=kd)\n if correction > 100: correction = 100\n if correction < -100: correction = -100\n robot.on(speed = 20, steering = correction)\n lasterror = error\n sound.beep()\n robot.off()", "def lineFollowTillIntersectionPID(kp = 1.0, ki = 0, kd = 0, color = ColorSensor(INPUT_1), color2 = ColorSensor(INPUT_3), \n robot = MoveSteering(OUTPUT_A, OUTPUT_B)): # *an intersection is a line that is going through the line that the robot is following\n \n color.mode = 'COL-REFLECT' #setting color mode\n color2.mode = 'COL-REFLECT' #setting color mode\n lasterror = 0 \n sound = Sound()\n while color2.reflected_light_intensity <= Constants.WHITE and False == Constants.STOP:\n error = color.reflected_light_intensity - ((Constants.WHITE + Constants.BLACK)/2) # colorLeft.reflected_light_intensity - colorRight.reflected_light_intensity\n # correction = error * GAIN # correction = PID(error, lasterror, kp, ki, kd)\n correction = PIDMath(error=error, lasterror = lasterror, kp=kp, ki=ki, kd=kd)\n if correction > 100: correction = 100\n if correction < -100: correction = -100\n robot.on(speed = 20, steering = correction)\n lasterror = error\n sound.beep()\n robot.off()", "def _tag_pose_callback(self):\n for msg in self.pose_msgs:\n\n detections = msg.detections\n if (len(msg.detections)==0):\n continue\n\n exponential_coordinates = []\n translations = []\n for detection in detections:\n self._T_tag2cam = get_T(detection.pose.pose.pose)\n self._marker_num = detection.id\n current_header = detection.pose.header\n inter_pose = self._world_map[self._marker_num, :]\n inter_pose = np.squeeze(inter_pose)\n\n self._T_tag2world = get_tag2world(inter_pose)\n self._T = np.dot(self._T_tag2world, np.linalg.inv(self._T_tag2cam))\n\n T = np.dot(tf.transformations.inverse_matrix(self.Previous_T), self._T)\n angle, direc, point = tf.transformations.rotation_from_matrix(T)\n translation = tf.transformations.translation_from_matrix(T)\n\n exponential_coordinate = direc*angle\n o = tf.transformations.translation_from_matrix(self._T)\n\n if o[2] < 0.697 and o[0] < -0.9 and o[0] > -4 and o[1] < -0.8 and o[1] > -4:\n if self.Previous_time != None:\n time_interval = detection.pose.header.stamp.to_sec() - self.Previous_time\n angular_velocity = angle / time_interval\n translational_velocity = np.linalg.norm(translation) / time_interval\n\n if (np.abs(angular_velocity) < 0.9) and (translational_velocity < 3):\n exponential_coordinates.append(exponential_coordinate)\n translations.append(translation)\n else:\n exponential_coordinates.append(exponential_coordinate)\n translations.append(translation)\n\n if len(exponential_coordinates):\n exponential_coordinates = np.array(exponential_coordinates)\n exponential_coordinates = np.mean(exponential_coordinates, axis=0)\n\n translations = np.array(translations)\n translations = np.mean(translations, axis=0)\n\n angle = np.linalg.norm(exponential_coordinates)\n direc = exponential_coordinate / angle\n\n T = tf.transformations.rotation_matrix(angle, direc)\n T[:3, 3] = translations\n self._T = np.dot(self.Previous_T, T)\n\n q = tf.transformations.quaternion_from_matrix(self._T)\n o = tf.transformations.translation_from_matrix(self._T)\n if q[0] < 0:\n q = -q;\n\n self.poses.append(np.concatenate([q, o]))\n self.pose_times.append(msg.header.stamp.to_sec())\n\n self.Previous_T = self._T\n self.Previous_time = msg.header.stamp.to_sec()\n\n self.poses = np.array(self.poses)\n self.pose_times = np.array(self.pose_times)", "def runTask1(self):\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.initialize()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.sweepDuckie()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieSuctionOn()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLiftEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppSweep()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppSuctionOff()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.reset()\r\n\t\tself._motion.terminate()", "def move_to_point_and_extract(coords_from_to: list,\n gps: adapters.GPSUbloxAdapter,\n vesc_engine: adapters.VescAdapterV4,\n smoothie: adapters.SmoothieAdapter,\n camera: adapters.CameraAdapterIMX219_170,\n periphery_det: detection.YoloOpenCVDetection,\n precise_det: detection.YoloOpenCVDetection,\n logger_full: utility.Logger,\n report_field_names,\n trajectory_saver: utility.TrajectorySaver,\n working_zone_polygon,\n img_output_dir,\n nav: navigation.GPSComputing,\n data_collector: datacollection.DataCollector,\n log_cur_dir,\n image_saver: utility.ImageSaver,\n notification: NotificationClient,\n extraction_manager_v3: ExtractionManagerV3,\n ui_msg_queue: posix_ipc.MessageQueue,\n SI_speed: float,\n wheels_straight: bool,\n navigation_prediction: navigation.NavigationPrediction,\n future_points: list,\n allow_extractions: bool,\n x_scan_poly: list,\n cur_field):\n\n if config.ALLOW_FIELD_LEAVING_PROTECTION and cur_field is not None and len(cur_field) > 2:\n enable_field_leaving_protection = True\n else:\n enable_field_leaving_protection = False\n if config.ALLOW_FIELD_LEAVING_PROTECTION:\n if cur_field is None:\n msg = f\"WARNING: robot field leaving protection WILL NOT WORK as given field is None\"\n print(msg)\n logger_full.write(msg)\n elif len(cur_field) < 3:\n msg = f\"WARNING: robot field leaving protection WILL NOT WORK as given field contains \" \\\n f\"{len(cur_field)} points (required ar least 3 points)\"\n print(msg)\n logger_full.write(msg)\n\n extract = SI_speed > 0 and allow_extractions\n\n vesc_speed = SI_speed * config.MULTIPLIER_SI_SPEED_TO_RPM\n speed_fast = config.SI_SPEED_FAST * config.MULTIPLIER_SI_SPEED_TO_RPM\n vesc_speed_fast = speed_fast if SI_speed >= 0 else -speed_fast\n navigation_prediction.set_SI_speed(SI_speed)\n\n raw_angles_history = []\n detections_period = []\n navigations_period = []\n stop_helping_point = nav.get_coordinate(\n coords_from_to[1], coords_from_to[0], 90, 1000)\n learn_go_straight_index = 0\n learn_go_straight_history = []\n\n last_skipped_point = coords_from_to[0]\n start_Nav_while = True\n last_correct_raw_angle = 0\n point_status = \"origin\"\n last_corridor_side = 0\n current_corridor_side = 1\n almost_start = 0\n\n prev_maneuver_time = time.time()\n working_mode_slow = 1\n working_mode_fast = 2\n working_mode_switching = 3\n current_working_mode = working_mode_slow\n last_working_mode = 0\n # True if robot is close to one of current movement vector points, False otherwise; False if speed limit near points is disabled\n close_to_end = config.USE_SPEED_LIMIT\n bumper_is_pressed = None\n\n # message queue sending temporary performance tracker\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf = {\n \"max_time\": 0,\n \"min_time\": float(\"inf\"),\n \"total_time\": 0,\n \"total_sends\": 0,\n \"timeouts_exceeded\": 0\n }\n\n # x movements during periphery scans\n x_scan_cur_idx = 0\n x_scan_idx_increasing = True\n\n # set camera to the Y min\n res = smoothie.custom_separate_xy_move_to(X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n # TODO: maybe should add sleep time as camera currently has delay\n\n if config.AUDIT_MODE:\n vesc_engine.set_target_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n try:\n notificationQueue = posix_ipc.MessageQueue(\n config.QUEUE_NAME_UI_NOTIFICATION)\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n notificationQueue = None\n\n degraded_navigation_mode = False\n\n number_navigation_cycle_without_gps = 0\n\n point_reading_t = last_send_gps_time = slow_mode_time = time.time()\n\n have_time_for_inference = True\n predictor_next_gps_expected_ts = float(\"inf\")\n\n # main navigation control loop\n while True:\n # gps point reading time predictor\n if have_time_for_inference and config.ALLOW_GPS_TIME_PREDICTIONS_LIMITING_INFERENCE:\n if time.time() + config.INFERENCE_MAX_TICK_TIME > predictor_next_gps_expected_ts:\n have_time_for_inference = False\n\n if have_time_for_inference:\n # EXTRACTION CONTROL\n start_t = time.time()\n frame = camera.get_image()\n frame_t = time.time()\n\n per_det_start_t = time.time()\n if extract:\n plants_boxes = periphery_det.detect(frame)\n else:\n plants_boxes = list()\n per_det_end_t = time.time()\n detections_period.append(per_det_end_t - start_t)\n\n if config.SAVE_DEBUG_IMAGES:\n image_saver.save_image(\n frame,\n img_output_dir,\n label=\"PE_view_M=\" + str(current_working_mode),\n plants_boxes=plants_boxes)\n if config.ALLOW_GATHERING and current_working_mode == working_mode_slow and \\\n image_saver.get_counter(\"gathering\") < config.DATA_GATHERING_MAX_IMAGES:\n image_saver.save_image(frame, config.DATA_GATHERING_DIR,\n plants_boxes=plants_boxes, counter_key=\"gathering\")\n\n if extract:\n msg = \"View frame time: \" + str(frame_t - start_t) + \"\\t\\tPeri. det. time: \" + \\\n str(per_det_end_t - per_det_start_t)\n else:\n msg = \"View frame time: \" + str(frame_t - start_t) + \"\\t\\tPeri. det. (extractions are off) time: \" + \\\n str(per_det_end_t - per_det_start_t)\n logger_full.write(msg + \"\\n\")\n\n # MOVEMENT AND ACTIONS MODES\n if config.AUDIT_MODE:\n dc_start_t = time.time()\n\n # count detected plant boxes for each type\n plants_count = dict()\n for plant_box in plants_boxes:\n plant_box_name = plant_box.get_name()\n if plant_box_name in plants_count:\n plants_count[plant_box_name] += 1\n else:\n plants_count[plant_box_name] = 1\n\n # save info into data collector\n for plant_label in plants_count:\n data_collector.add_detections_data(plant_label,\n math.ceil((plants_count[plant_label]) / config.AUDIT_DIVIDER))\n\n # flush updates into the audit output file and log measured time\n if len(plants_boxes) > 0:\n data_collector.save_all_data(\n log_cur_dir + config.AUDIT_OUTPUT_FILE)\n\n dc_t = time.time() - dc_start_t\n msg = \"Last scan weeds detected: \" + str(len(plants_boxes)) + \\\n \", audit processing tick time: \" + str(dc_t)\n logger_full.write(msg + \"\\n\")\n else:\n # slow mode\n if current_working_mode == working_mode_slow:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : slow\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n if config.VERBOSE_EXTRACT:\n msg = \"[VERBOSE EXTRACT] Stopping the robot because we have detected plant(s).\"\n logger_full.write_and_flush(msg+\"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n # TODO this 0 rpm \"movement\" is to prevent robot movement during extractions, need to add this in future to rest speed modes too\n vesc_engine.set_time_to_move(config.VESC_MOVING_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(0, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(0, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n # TODO remove thread init from here!\n voltage_thread = threading.Thread(\n target=send_voltage_thread_tf,\n args=(vesc_engine, ui_msg_queue),\n daemon=True)\n voltage_thread.start()\n\n # single precise center scan before calling for PDZ scanning and extractions\n if config.ALLOW_PRECISE_SINGLE_SCAN_BEFORE_PDZ and not config.ALLOW_X_MOVEMENT_DURING_SCANS:\n time.sleep(config.DELAY_BEFORE_2ND_SCAN)\n frame = camera.get_image()\n plants_boxes = precise_det.detect(frame)\n\n # do PDZ scan and extract all plants if single precise scan got plants in working area\n if ExtractionManagerV3.any_plant_in_zone(plants_boxes, working_zone_polygon):\n if config.EXTRACTION_MODE == 1:\n extraction_manager_v3.extract_all_plants()\n elif config.EXTRACTION_MODE == 2:\n extraction_manager_v3.mill_all_plants()\n slow_mode_time = time.time()\n else:\n if config.EXTRACTION_MODE == 1:\n extraction_manager_v3.extract_all_plants()\n elif config.EXTRACTION_MODE == 2:\n extraction_manager_v3.mill_all_plants()\n slow_mode_time = time.time()\n\n if config.VERBOSE_EXTRACT:\n msg = \"[VERBOSE EXTRACT] Extract cycle are finish.\"\n logger_full.write_and_flush(msg+\"\\n\")\n\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n\n msg = \"Applying force step forward after extractions cycle(s)\"\n logger_full.write(msg + \"\\n\")\n if config.VERBOSE:\n print(msg)\n vesc_engine.set_time_to_move(config.STEP_FORWARD_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(\n config.SI_SPEED_STEP_FORWARD * config.MULTIPLIER_SI_SPEED_TO_RPM,\n vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n vesc_engine.wait_for_stop(vesc_engine.PROPULSION_KEY)\n\n elif config.SLOW_FAST_MODE and time.time() - slow_mode_time > config.SLOW_MODE_MIN_TIME:\n # move cork to fast mode scan position\n if config.VERBOSE:\n msg = \"SLOW MODE: moving cork to fast mode position\\n\"\n logger_full.write(msg)\n\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm((config.Y_MAX - config.Y_MIN) * config.SLOW_FAST_MODE_HEAD_FACTOR,\n \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Keeping in slow mode as failed to move camera to fast mode scan position, smoothie's response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n else:\n msg = \"Switching from 'slow mode' to 'switching mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_switching\n\n # TODO a bug: will not start moving if config.SLOW_MODE_MIN_TIME == 0 or too low (switch speed applies right after slow mode weeds extractions)\n if not vesc_engine.is_moving(vesc_engine.PROPULSION_KEY):\n vesc_engine.set_time_to_move(config.VESC_MOVING_TIME, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_target_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n\n # switching (from slow to fast) mode\n elif current_working_mode == working_mode_switching:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : switching to fast\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n if config.VERBOSE:\n msg = \"Moving cork to slow mode scan position\\n\"\n logger_full.write(msg)\n\n # smoothie.wait_for_all_actions_done()\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n current_working_mode = working_mode_slow\n slow_mode_time = time.time()\n vesc_engine.set_target_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n continue\n\n sm_cur_pos = smoothie.get_smoothie_current_coordinates(\n convert_to_mms=False)\n if abs(sm_cur_pos[\"X\"] - (config.X_MAX - config.X_MIN) / 2) < 0.001 and \\\n abs(sm_cur_pos[\"Y\"] - (config.Y_MAX - config.Y_MIN) * config.SLOW_FAST_MODE_HEAD_FACTOR) < 0.001:\n msg = \"Switching from 'switching mode' to 'fast mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_fast\n\n # fast mode\n elif current_working_mode == working_mode_fast:\n if last_working_mode != current_working_mode:\n last_working_mode = current_working_mode\n msg = \"[Working mode] : fast\"\n if config.LOG_SPEED_MODES:\n logger_full.write_and_flush(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n\n if ExtractionManagerV3.any_plant_in_zone(\n plants_boxes,\n x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n if config.VERBOSE:\n msg = \"Moving cork to slow mode scan position\\n\"\n logger_full.write(msg)\n\n # smoothie.wait_for_all_actions_done()\n res = smoothie.custom_separate_xy_move_to(\n X_F=config.X_F_MAX,\n Y_F=config.Y_F_MAX,\n X=smoothie.smoothie_to_mm(\n (config.X_MAX - config.X_MIN) / 2, \"X\"),\n Y=smoothie.smoothie_to_mm(config.Y_MIN, \"Y\"))\n if res != smoothie.RESPONSE_OK:\n msg = \"INIT: Failed to move camera to Y min, smoothie response:\\n\" + res\n logger_full.write(msg + \"\\n\")\n smoothie.wait_for_all_actions_done()\n\n msg = \"Switching from 'fast mode' to 'slow mode'\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n current_working_mode = working_mode_slow\n slow_mode_time = time.time()\n # TODO dont need anymore? as rpm is set at the end of slow mode\n # vesc_engine.set_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)\n continue\n elif close_to_end:\n cur_vesc_rpm = vesc_engine.get_current_rpm(\n vesc_engine.PROPULSION_KEY)\n if cur_vesc_rpm != vesc_speed:\n msg = f\"Applying slow speed {vesc_speed} at 'fast mode' \" \\\n f\"(was {cur_vesc_rpm}) \" \\\n f\"because of close_to_end flag trigger\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n vesc_engine.set_target_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(\n vesc_speed, vesc_engine.PROPULSION_KEY)\n else:\n cur_vesc_rpm = vesc_engine.get_current_rpm(\n vesc_engine.PROPULSION_KEY)\n if cur_vesc_rpm != vesc_speed_fast:\n msg = f\"Applying fast speed {vesc_speed_fast} at 'fast mode' (was {cur_vesc_rpm})\"\n if config.LOG_SPEED_MODES:\n logger_full.write(msg + \"\\n\")\n if config.PRINT_SPEED_MODES:\n print(msg)\n vesc_engine.set_target_rpm(\n vesc_speed_fast, vesc_engine.PROPULSION_KEY)\n vesc_engine.set_current_rpm(\n vesc_speed_fast, vesc_engine.PROPULSION_KEY)\n\n # NAVIGATION CONTROL\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n nav_start_t = time.time()\n\n if start_Nav_while:\n navigation_period = 1\n else:\n navigation_period = nav_start_t - prev_maneuver_time\n\n navigations_period.append(navigation_period)\n # time reference to decide the number of detection before resuming gps.get\n prev_maneuver_time = nav_start_t\n # print(\"tock\")\n\n if start_Nav_while:\n prev_pos_obj = cur_pos_obj\n prev_pos = prev_pos_obj.as_old_list\n start_Nav_while = False\n\n # mu_navigations_period, sigma_navigations_period = utility.mu_sigma(navigations_period)\n\n navigation_prediction.set_current_lat_long(cur_pos)\n\n # skip same points (non-blocking reading returns old point if new point isn't available yet)\n if math.isclose(cur_pos_obj.creation_ts, prev_pos_obj.creation_ts):\n # stop robot if there's no new points for a while\n if time.time() - point_reading_t > config.GPS_POINT_TIME_BEFORE_STOP:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n msg = f\"Stopping the robot due to exceeding time 'GPS_POINT_TIME_BEFORE_STOP=\" \\\n f\"{config.GPS_POINT_TIME_BEFORE_STOP}' limit without new gps points from adapter\"\n logger_full.write_and_flush(msg + \"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n gps_reconnect_ts = time.time()\n\n while True:\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n if math.isclose(cur_pos_obj.creation_ts, prev_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new GPS \" \\\n \"point (new points filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n else:\n msg = \"New GPS point received, continuing movement\"\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n else:\n continue\n\n # gps points reading time predictor\n predictor_next_gps_expected_ts = cur_pos_obj.receiving_ts + config.GPS_POINT_WAIT_TIME_MAX\n have_time_for_inference = True\n\n # points filter by quality flag\n if cur_pos[2] != \"4\" and config.ALLOW_GPS_BAD_QUALITY_NTRIP_RESTART:\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(logger_full)\n\n # stop robot due to bad point quality if allowed\n if config.ALLOW_GPS_BAD_QUALITY_STOP:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n logger_full.write_and_flush(\n \"Stopping the robot for lack of quality gps 4, waiting for it...\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n prev_bad_quality_pos_obj = cur_pos_obj\n gps_reconnect_ts = time.time()\n\n while True:\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n # check if it's a new point\n if math.isclose(cur_pos_obj.creation_ts, prev_bad_quality_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new \" \\\n \"GPS point (quality filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n continue\n else:\n prev_bad_quality_pos_obj = cur_pos_obj\n\n # check if it's a good quality point\n if cur_pos[2] != \"4\":\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(\n logger_full)\n else:\n msg = \"The gps has regained quality 4, starting movement\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n # points filter by distance\n prev_cur_distance = nav.get_distance(prev_pos, cur_pos)\n if config.ALLOW_GPS_PREV_CUR_DIST_STOP and prev_cur_distance > config.PREV_CUR_POINT_MAX_DIST:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n msg = f\"Stopping the robot due to GPS points filter by distance (assuming current position point \" \\\n f\"{str(cur_pos)} is wrong as distance between current position and prev. position {str(prev_pos)}\" \\\n f\" is bigger than config.PREV_CUR_POINT_MAX_DIST={str(config.PREV_CUR_POINT_MAX_DIST)})\"\n logger_full.write_and_flush(msg + \"\\n\")\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n\n prev_bad_quality_pos_obj = cur_pos_obj\n gps_reconnect_ts = distance_wait_start_ts = time.time()\n\n while True:\n if time.time() - distance_wait_start_ts > config.GPS_DIST_WAIT_TIME_MAX:\n msg = f\"Stopping waiting for good prev-cur distance due to timeout, using current point \" \\\n f\"{cur_pos} and starting moving again\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n cur_pos_obj = gps.get_last_position_v2()\n cur_pos = cur_pos_obj.as_old_list\n\n # check if it's a new point\n if math.isclose(cur_pos_obj.creation_ts, prev_bad_quality_pos_obj.creation_ts):\n # reconnect gps adapter to ublox if there's no gps points for a while\n if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:\n gps.reconnect()\n gps_reconnect_ts = time.time()\n msg = \"Called GPS adapter to reconnect to ublox due to waiting too much for a new \" \\\n \"GPS point (distance filter)\"\n if config.VERBOSE:\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n continue\n else:\n prev_bad_quality_pos_obj = cur_pos_obj\n\n # check if it's a good quality point or ignore point quality if bad quality stop is not allowed\n if cur_pos[2] != \"4\" and config.ALLOW_GPS_BAD_QUALITY_NTRIP_RESTART:\n # restart ntrip if enough time passed since the last ntrip restart\n navigation.NavigationV3.restart_ntrip_service(logger_full)\n continue\n\n # check if distance became ok\n prev_cur_distance = nav.get_distance(prev_pos, cur_pos)\n if prev_cur_distance <= config.PREV_CUR_POINT_MAX_DIST:\n msg = f\"Starting moving again after GPS points filter by distance as distance become OK \" \\\n f\"({str(prev_cur_distance)})\"\n logger_full.write_and_flush(msg + \"\\n\")\n vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)\n break\n\n point_reading_t = time.time()\n\n trajectory_saver.save_point(cur_pos)\n if ui_msg_queue is not None and time.time()-last_send_gps_time >= 1:\n try:\n ui_msg_queue_send_ts = time.time()\n ui_msg_queue.send(json.dumps(\n {\"last_gps\": cur_pos}), timeout=config.QUEUE_WAIT_TIME_MAX)\n last_send_gps_time = time.time()\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_send_et = last_send_gps_time - ui_msg_queue_send_ts\n if ui_msg_queue_send_et < ui_msg_queue_perf[\"min_time\"]:\n ui_msg_queue_perf[\"min_time\"] = ui_msg_queue_send_et\n if ui_msg_queue_send_et > ui_msg_queue_perf[\"max_time\"]:\n ui_msg_queue_perf[\"max_time\"] = ui_msg_queue_send_et\n ui_msg_queue_perf[\"total_time\"] += ui_msg_queue_send_et\n ui_msg_queue_perf[\"total_sends\"] += 1\n except posix_ipc.BusyError:\n msg = f\"Current position wasn't sent to ui_msg_queue likely due to sending timeout \" \\\n f\"(max wait time: config.QUEUE_WAIT_TIME_MAX={config.QUEUE_WAIT_TIME_MAX}\"\n logger_full.write(msg + \"\\n\")\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf[\"timeouts_exceeded\"] += 1\n\n if config.CONTINUOUS_INFORMATION_SENDING and not degraded_navigation_mode:\n notification.set_current_coordinate(cur_pos)\n\n distance = nav.get_distance(cur_pos, coords_from_to[1])\n\n last_corridor_side = current_corridor_side\n perpendicular, current_corridor_side = nav.get_deviation(\n coords_from_to[0], coords_from_to[1], cur_pos)\n\n # stop the robot if it has left the field\n if enable_field_leaving_protection:\n for pt_idx in range(len(cur_field)):\n last_point = pt_idx + 1 == len(cur_field)\n\n if last_point:\n deviation, side = nav.get_deviation(cur_field[pt_idx], cur_field[0], cur_pos)\n else:\n deviation, side = nav.get_deviation(cur_field[pt_idx], cur_field[pt_idx + 1], cur_pos)\n\n if side == -1 and deviation > config.LEAVING_PROTECTION_DISTANCE_MAX:\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n msg = f\"Robot is stopped due to leaving the field. Cur pos: '{str(cur_pos)}'; \" \\\n f\"Field comparison vector - P1: '{str(cur_field[pt_idx])}', \" \\\n f\"P2: '{str(cur_field[0] if last_point else cur_field[pt_idx + 1])}'\"\n print(msg)\n logger_full.write_and_flush(msg + \"\\n\")\n notification.set_robot_state(RobotStates.OUT_OF_SERVICE)\n exit()\n\n # check if arrived\n _, side = nav.get_deviation(\n coords_from_to[1], stop_helping_point, cur_pos)\n # if distance <= config.COURSE_DESTINATION_DIFF: # old way\n if side != 1: # TODO: maybe should use both side and distance checking methods at once\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n # msg = \"Arrived (allowed destination distance difference \" + str(config.COURSE_DESTINATION_DIFF) + \" mm)\"\n # TODO: service will reload script even if it done his work?\n msg = \"Arrived to \" + str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n # put the wheel straight\n if wheels_straight:\n response = smoothie.custom_move_to(A_F=config.A_F_MAX, A=0)\n if response != smoothie.RESPONSE_OK: # TODO: what if response is not ok?\n msg = \"Couldn't turn wheels to center (0), smoothie response:\\n\" + \\\n response\n print(msg)\n logger_full.write(msg + \"\\n\")\n else:\n # save wheels angle\n with open(config.LAST_ANGLE_WHEELS_FILE, \"w+\") as wheels_angle_file:\n wheels_angle_file.write(\n str(smoothie.get_adapter_current_coordinates()[\"A\"]))\n break\n\n # TODO check for bug: arrival check applies single speed for all path (while multiple speeds are applied)\n # check if can arrived\n if vesc_engine.get_current_rpm(vesc_engine.PROPULSION_KEY) / config.MULTIPLIER_SI_SPEED_TO_RPM * \\\n config.MANEUVERS_FREQUENCY > nav.get_distance(cur_pos, coords_from_to[1]):\n vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)\n data_collector.add_vesc_moving_time_data(\n vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))\n msg = \"Will have arrived before the next point to \" + \\\n str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n break\n\n # reduce speed if near the target point\n if config.USE_SPEED_LIMIT:\n distance_from_start = nav.get_distance(coords_from_to[0], cur_pos)\n close_to_end = distance < config.DECREASE_SPEED_TRESHOLD or distance_from_start < config.DECREASE_SPEED_TRESHOLD\n\n msg = \"Distance to B: \" + str(distance)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n msg = \"Prev: \" + str(prev_pos) + \" Cur: \" + str(cur_pos) + \" A: \" + str(coords_from_to[0]) \\\n + \" B: \" + str(coords_from_to[1])\n # print(msg)\n logger_full.write(msg + \"\\n\")\n\n # pass by cur points which are very close to prev point to prevent angle errors when robot is staying\n # (too close points in the same position can produce false huge angles)\n\n navigation_prediction.run_prediction(coords_from_to, cur_pos)\n\n # raw_angle_cruise = nav.get_angle(coords_from_to[0], cur_pos, cur_pos, coords_from_to[1])\n # raw_angle_legacy = nav.get_angle(prev_pos, cur_pos, cur_pos, coords_from_to[1])\n raw_angle_centroid = nav.get_angle(\n prev_pos, cur_pos, coords_from_to[0], coords_from_to[1])\n raw_angle_cruise = - current_corridor_side * math.log(1+perpendicular)\n\n if nav.get_distance(coords_from_to[0], coords_from_to[1]) < config.CORNER_THRESHOLD and nav.get_distance(coords_from_to[1], future_points[0][0]) < config.CORNER_THRESHOLD:\n # if abs(raw_angle_legacy)>config.LOST_THRESHOLD:\n centroid_factor = config.CENTROID_FACTOR_LOST\n cruise_factor = 1/centroid_factor\n else:\n centroid_factor = config.CENTROID_FACTOR_ORIENTED\n cruise_factor = 1\n\n raw_angle = raw_angle_centroid*centroid_factor + raw_angle_cruise*cruise_factor\n\n # raw_angle = butter_lowpass_filter(raw_angle, 0.5, 4, 6)\n\n if config.LEARN_GO_STRAIGHT:\n if config.MIN_PERPENDICULAR_GO_STRAIGHT >= perpendicular:\n learn_go_straight_index += 1\n learn_go_straight_history.append(raw_angle)\n if len(learn_go_straight_history) >= config.VALUES_LEARN_GO_STRAIGHT:\n learn_go_straight = sum(\n learn_go_straight_history)/len(learn_go_straight_history)\n msg = f\"Average angle applied to the wheel for the robot to have found : {learn_go_straight}.\"\n logger_full.write_and_flush(msg + \"\\n\")\n # TODO opening and closing file 4 times per second\n with open(config.LEARN_GO_STRAIGHT_FILE, \"w+\") as learn_go_straight_file:\n learn_go_straight_file.write(str(learn_go_straight))\n else:\n learn_go_straight_index = 0\n\n # NAVIGATION STATE MACHINE\n if prev_cur_distance < config.PREV_CUR_POINT_MIN_DIST:\n raw_angle = last_correct_raw_angle\n # print(\"The distance covered is low\")\n point_status = \"skipped\"\n\n # register the last position where the robot almost stop\n # in order to disable the deviation servo for a config.POURSUIT_LIMIT length and then resume in cruise\n last_skipped_point = cur_pos\n else:\n last_correct_raw_angle = raw_angle\n point_status = \"correct\"\n\n almost_start = nav.get_distance(last_skipped_point, cur_pos)\n\n # sum(e)\n if len(raw_angles_history) >= config.WINDOW:\n raw_angles_history.pop(0)\n raw_angles_history.append(raw_angle)\n # print(\"len(raw_angles_history):\",len(raw_angles_history))\n sum_angles = sum(raw_angles_history)\n if sum_angles > config.SUM_ANGLES_HISTORY_MAX:\n msg = \"Sum angles \" + str(sum_angles) + \" is bigger than max allowed value \" + \\\n str(config.SUM_ANGLES_HISTORY_MAX) + \", setting to \" + \\\n str(config.SUM_ANGLES_HISTORY_MAX)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n # Get Ready to go down as soon as the angle get negatif\n raw_angles_history[len(raw_angles_history) -\n 1] -= sum_angles - config.SUM_ANGLES_HISTORY_MAX\n sum_angles = config.SUM_ANGLES_HISTORY_MAX\n elif sum_angles < -config.SUM_ANGLES_HISTORY_MAX:\n msg = \"Sum angles \" + str(sum_angles) + \" is less than min allowed value \" + \\\n str(-config.SUM_ANGLES_HISTORY_MAX) + \", setting to \" + \\\n str(-config.SUM_ANGLES_HISTORY_MAX)\n # print(msg)\n logger_full.write(msg + \"\\n\")\n # get Ready to go up as soon as the angle get positive:\n raw_angles_history[len(raw_angles_history)-1] += - \\\n sum_angles - config.SUM_ANGLES_HISTORY_MAX\n sum_angles = -config.SUM_ANGLES_HISTORY_MAX\n\n # KP = 0.2*0,55\n # KI = 0.0092*0,91\n\n KP = getSpeedDependentConfigParam(\n config.KP, SI_speed, \"KP\", logger_full)\n KI = getSpeedDependentConfigParam(\n config.KI, SI_speed, \"KI\", logger_full)\n\n angle_kp_ki = raw_angle * KP + sum_angles * KI\n\n # smoothie -Value == left, Value == right\n target_angle_sm = angle_kp_ki * -config.A_ONE_DEGREE_IN_SMOOTHIE\n # target_angle_sm = 0 #Debug COVID_PLACE\n ad_wheels_pos = smoothie.get_adapter_current_coordinates()[\"A\"]\n # sm_wheels_pos = smoothie.get_smoothie_current_coordinates()[\"A\"]\n sm_wheels_pos = \"off\"\n\n # compute order angle (smoothie can't turn for huge values immediately also as cancel movement,\n # so we need to do nav. actions in steps)\n order_angle_sm = target_angle_sm - ad_wheels_pos\n\n # check for out of update frequency and smoothie execution speed range (for nav wheels)\n if order_angle_sm > config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND * \\\n config.A_ONE_DEGREE_IN_SMOOTHIE:\n msg = \"Order angle changed from \" + str(order_angle_sm) + \" to \" + str(\n config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND +\n config.A_ONE_DEGREE_IN_SMOOTHIE) + \" due to exceeding degrees per tick allowed range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND * \\\n config.A_ONE_DEGREE_IN_SMOOTHIE\n elif order_angle_sm < -(config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE):\n msg = \"Order angle changed from \" + str(order_angle_sm) + \" to \" + str(-(\n config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE)) + \" due to exceeding degrees per tick allowed range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = -(config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *\n config.A_ONE_DEGREE_IN_SMOOTHIE)\n\n # convert to global smoothie coordinates\n order_angle_sm += ad_wheels_pos\n\n # checking for out of smoothie supported range\n if order_angle_sm > config.A_MAX:\n msg = \"Global order angle changed from \" + str(order_angle_sm) + \" to config.A_MAX = \" + \\\n str(config.A_MAX) + \\\n \" due to exceeding smoothie allowed values range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.A_MAX\n elif order_angle_sm < config.A_MIN:\n msg = \"Global order angle changed from \" + str(order_angle_sm) + \" to config.A_MIN = \" + \\\n str(config.A_MIN) + \\\n \" due to exceeding smoothie allowed values range.\"\n # print(msg)\n logger_full.write(msg + \"\\n\")\n order_angle_sm = config.A_MIN\n\n # cork x movement during periphery scans control\n if config.ALLOW_X_MOVEMENT_DURING_SCANS:\n if x_scan_idx_increasing:\n x_scan_cur_idx += 1\n if x_scan_cur_idx >= len(config.X_MOVEMENT_CAMERA_POSITIONS):\n x_scan_idx_increasing = False\n x_scan_cur_idx -= 2\n else:\n x_scan_cur_idx -= 1\n if x_scan_cur_idx < 0:\n x_scan_idx_increasing = True\n x_scan_cur_idx += 2\n # TODO do we check SI_speed earlier and do proper calculations and angle validations if here we'll get here a negative order angle instead of positive?\n response = smoothie.custom_move_to(\n A_F=config.A_F_MAX,\n A=order_angle_sm if SI_speed >= 0 else -order_angle_sm,\n X_F=config.X_MOVEMENT_CAMERA_X_F[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else None,\n X=config.X_MOVEMENT_CAMERA_POSITIONS[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else None\n )\n\n if response != smoothie.RESPONSE_OK:\n msg = \"Couldn't turn wheels! Smoothie response:\\n\" + response\n print(msg)\n logger_full.write(msg + \"\\n\")\n else:\n # TODO opening and closing file too often (likely 4 times per second)\n # save wheels angle\n with open(config.LAST_ANGLE_WHEELS_FILE, \"w+\") as wheels_angle_file:\n wheels_angle_file.write(\n str(smoothie.get_adapter_current_coordinates()[\"A\"]))\n\n raw_angle = round(raw_angle, 2)\n angle_kp_ki = round(angle_kp_ki, 2)\n order_angle_sm = round(order_angle_sm, 2)\n sum_angles = round(sum_angles, 2)\n distance = round(distance, 2)\n ad_wheels_pos = round(ad_wheels_pos, 2)\n perpendicular = round(perpendicular, 2)\n # sm_wheels_pos = round(sm_wheels_pos, 2)\n gps_quality = cur_pos[2]\n corridor = \"\"\n if current_corridor_side == -1:\n corridor = \"left\"\n elif current_corridor_side == 1:\n corridor = \"right\"\n\n raw_angle_cruise = round(raw_angle_cruise, 2)\n\n msg = str(gps_quality).ljust(5) + \\\n str(raw_angle).ljust(8) + \\\n str(angle_kp_ki).ljust(8) + \\\n str(order_angle_sm).ljust(8) + \\\n str(sum_angles).ljust(8) + \\\n str(distance).ljust(13) + \\\n str(ad_wheels_pos).ljust(8) + \\\n str(sm_wheels_pos).ljust(9) + \\\n point_status.ljust(12) + \\\n str(perpendicular).ljust(10) + \\\n corridor.ljust(9) + \\\n str(centroid_factor).ljust(16) + \\\n str(cruise_factor).ljust(14)\n print(msg)\n logger_full.write(msg + \"\\n\")\n\n # TODO vesc sensors are being asked 4 times per second\n # send voltage and track bumper state\n vesc_data = vesc_engine.get_sensors_data(\n report_field_names, vesc_engine.PROPULSION_KEY)\n if vesc_data is not None and \"input_voltage\" in vesc_data:\n if bumper_is_pressed is None:\n bumper_is_pressed = not vesc_data[\"input_voltage\"] > config.VESC_BUMBER_UNTRIGGER_VOLTAGE\n if bumper_is_pressed:\n msg = f\"Bumper is pressed initially before starting moving to point. \" \\\n f\"({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n elif not bumper_is_pressed and vesc_data[\"input_voltage\"] < config.VESC_BUMBER_TRIGGER_VOLTAGE:\n bumper_is_pressed = True\n msg = f\"Bumper was pressed. ({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n elif bumper_is_pressed and vesc_data[\"input_voltage\"] > config.VESC_BUMBER_UNTRIGGER_VOLTAGE:\n bumper_is_pressed = False\n msg = f\"Bumper was unpressed. ({vesc_data['input_voltage']}V)\"\n logger_full.write(msg + \"\\n\")\n\n if config.CONTINUOUS_INFORMATION_SENDING:\n notification.set_input_voltage(vesc_data[\"input_voltage\"])\n\n prev_pos_obj = cur_pos_obj\n prev_pos = prev_pos_obj.as_old_list\n\n msg = \"Nav calc time: \" + str(time.time() - nav_start_t)\n logger_full.write(msg + \"\\n\\n\")\n\n if config.QUEUE_TRACK_PERFORMANCE:\n ui_msg_queue_perf[\"avg_time\"] = ui_msg_queue_perf[\"total_time\"] / \\\n ui_msg_queue_perf[\"total_sends\"]\n msg = f\"Position sending performance report: {ui_msg_queue_perf}\"\n if config.VERBOSE:\n print(msg)\n logger_full.write(msg + \"\\n\")", "def _callback_meteo(self, msg):\n\n\t\tself.psi = self.north2east( msg.true_wind_direction )", "def __init__(self):\n rospy.init_node('square')\n rospy.Subscriber('/odom', Odometry, self.processOdom)\n self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\n\n self.sleepy = rospy.Rate(2)\n\n # make dictionary that calls functions\n self.state = {'i':self.forward, ',':self.backward,\n 'l':self.rightTurn, 'j':self.leftTurn,\n 'k':self.stop}\n\n self.x = 0 # position in meters\n self.y = 0 # position in meters\n self.z = 0 # angle in degrees\n self.desiredX = 0\n self.desiredY = 0\n self.desiredZ = 0\n\n self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.sendMessage()\n\n self.start = time()\n\n # get key interupt things\n self.settings = termios.tcgetattr(sys.stdin)\n self.key = None", "def callback(self, data):\n X = data.linear.x\n Z = data.angular.z\n commands = self.controller.Compute(X, Z)\n self.last_time = time.clock()\n self.m0 = commands[1]\n self.m1 = commands[0]", "def process(self):", "def process(self):", "def process(self):", "def process(x,memory):\r\n r = 0.01\r\n if x['name'] == 'accelerator_pedal_position':\r\n memory['acc'] = r*x['value'] + (1-r)*memory['acc']\r\n elif x['name'] == 'vehicle_speed':\r\n memory['speed'] = r*x['value'] + (1-r)*memory['speed']\r\n elif x['name'] == 'latitude':\r\n memory['latitude'] = x['value']\r\n elif x['name'] == 'longitude':\r\n memory['longitude'] = x['value']\r\n elif x['name'] == 'steering_wheel_angle':\r\n if len(memory['Th']) > 0:\r\n memory['dTh'].append(x['value'] - memory['Th'][-1])\r\n memory['Th'].append(x['value'])", "def get_current_ee_pose(self):\n #self.arm_endpoint = #magic tf call that I can add\n while True:\n try:\n translation, rotation = self.listener.lookupTransform('world_frame', 'palm_frame', rospy.Time()) #j2s7s300_end_effector\n break # once the transform is obtained move on\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue # if it fails try again\n point = [translation[0], translation[1], translation[2]]\n self.arm_endpoint = np.array(point)\n # rospy.logerr(self.arm_endpoint)", "def process():", "def run_tracker(p):\n # load model\n net = torch.load(os.path.join(p.net_base_path, p.net))\n net = net.to(device)\n\n # evaluation mode\n net.eval()\n\n # load sequence\n img_list, target_position, target_size = load_sequence(p.seq_base_path, p.video)\n\n # first frame\n img_uint8 = cv2.imread(img_list[0])\n img_uint8 = cv2.cvtColor(img_uint8, cv2.COLOR_BGR2RGB)\n img_double = np.double(img_uint8) # uint8 to float\n\n # compute avg for padding\n avg_chans = np.mean(img_double, axis=(0, 1))\n\n wc_z = target_size[1] + p.context_amount * sum(target_size)\n hc_z = target_size[0] + p.context_amount * sum(target_size)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = p.examplar_size / s_z\n\n # crop examplar z in the first frame\n z_crop = get_subwindow_tracking(img_double, target_position, p.examplar_size, round(s_z), avg_chans)\n\n z_crop = np.uint8(z_crop) # you need to convert it to uint8\n # convert image to tensor\n z_crop_tensor = 255.0 * F.to_tensor(z_crop).unsqueeze(0)\n\n d_search = (p.instance_size - p.examplar_size) / 2\n pad = d_search / scale_z\n s_x = s_z + 2 * pad\n # arbitrary scale saturation\n min_s_x = p.scale_min * s_x\n max_s_x = p.scale_max * s_x\n\n # generate cosine window\n if p.windowing == 'cosine':\n window = np.outer(np.hanning(p.score_size * p.response_UP), np.hanning(p.score_size * p.response_UP))\n elif p.windowing == 'uniform':\n window = np.ones((p.score_size * p.response_UP, p.score_size * p.response_UP))\n window = window / sum(sum(window))\n\n # pyramid scale search\n scales = p.scale_step**np.linspace(-np.ceil(p.num_scale/2), np.ceil(p.num_scale/2), p.num_scale)\n\n # extract feature for examplar z\n z_features = net.feat_extraction(Variable(z_crop_tensor).to(device))\n z_features = z_features.repeat(p.num_scale, 1, 1, 1)\n\n # do tracking\n bboxes = np.zeros((len(img_list), 4), dtype=np.double) # save tracking result\n start_time = datetime.datetime.now()\n for i in range(0, len(img_list)):\n if i > 0:\n # do detection\n # currently, we only consider RGB images for tracking\n img_uint8 = cv2.imread(img_list[i])\n img_uint8 = cv2.cvtColor(img_uint8, cv2.COLOR_BGR2RGB)\n img_double = np.double(img_uint8) # uint8 to float\n\n scaled_instance = s_x * scales\n scaled_target = np.zeros((2, scales.size), dtype = np.double)\n scaled_target[0, :] = target_size[0] * scales\n scaled_target[1, :] = target_size[1] * scales\n\n # extract scaled crops for search region x at previous target position\n x_crops = make_scale_pyramid(img_double, target_position, scaled_instance, p.instance_size, avg_chans, p)\n\n # get features of search regions\n x_crops_tensor = torch.FloatTensor(x_crops.shape[3], x_crops.shape[2], x_crops.shape[1], x_crops.shape[0])\n # response_map = SiameseNet.get_response_map(z_features, x_crops)\n for k in range(x_crops.shape[3]):\n tmp_x_crop = x_crops[:, :, :, k]\n tmp_x_crop = np.uint8(tmp_x_crop)\n # numpy array to tensor\n x_crops_tensor[k, :, :, :] = 255.0 * F.to_tensor(tmp_x_crop).unsqueeze(0)\n\n # get features of search regions\n x_features = net.feat_extraction(Variable(x_crops_tensor).to(device))\n\n # evaluate the offline-trained network for exemplar x features\n target_position, new_scale = tracker_eval(net, round(s_x), z_features, x_features, target_position, window, p)\n\n # scale damping and saturation\n s_x = max(min_s_x, min(max_s_x, (1 - p.scale_LR) * s_x + p.scale_LR * scaled_instance[int(new_scale)]))\n target_size = (1 - p.scale_LR) * target_size + p.scale_LR * np.array([scaled_target[0, int(new_scale)], scaled_target[1, int(new_scale)]])\n\n rect_position = np.array([target_position[1]-target_size[1]/2, target_position[0]-target_size[0]/2, target_size[1], target_size[0]])\n\n if p.visualization:\n visualize_tracking_result(img_uint8, rect_position, 1)\n\n # output bbox in the original frame coordinates\n o_target_position = target_position\n o_target_size = target_size\n bboxes[i,:] = np.array([o_target_position[1]-o_target_size[1]/2, o_target_position[0]-o_target_size[0]/2, o_target_size[1], o_target_size[0]])\n\n end_time = datetime.datetime.now()\n fps = len(img_list)/max(1.0, (end_time-start_time).seconds)\n\n return bboxes, fps", "def execute(self):\n self._odom_msg.header.stamp = rospy.Time.now()\n # query base state from robot and store in odom msg\n position, orientation, linear_velocity, angular_velocity = self._robot.get_base_state()\n [self._odom_msg.pose.pose.position.x,\n self._odom_msg.pose.pose.position.y,\n self._odom_msg.pose.pose.position.z] = position\n [self._odom_msg.pose.pose.orientation.x,\n self._odom_msg.pose.pose.orientation.y,\n self._odom_msg.pose.pose.orientation.z,\n self._odom_msg.pose.pose.orientation.w] = orientation\n [self._odom_msg.twist.twist.linear.x,\n self._odom_msg.twist.twist.linear.y,\n self._odom_msg.twist.twist.linear.z] = linear_velocity\n [self._odom_msg.twist.twist.angular.x,\n self._odom_msg.twist.twist.angular.y,\n self._odom_msg.twist.twist.angular.z] = angular_velocity\n self._publisher.publish(self._odom_msg)\n\n tf_msg = TransformStamped()\n tf_msg.header.frame_id = self._odom_msg.header.frame_id\n tf_msg.child_frame_id = self._odom_msg.child_frame_id\n tf_msg.transform.translation = self._odom_msg.pose.pose.position\n tf_msg.transform.rotation = self._odom_msg.pose.pose.orientation\n tf_msg.header.stamp = rospy.Time.now()\n self._br.sendTransform(tf_msg)", "def compute_trajectory():\n pass", "def process_odom(self, msg):\n if (self.last_odom != None and\n msg.south_to_north_position != self.last_odom.south_to_north_position):\n delta = msg.south_to_north_position - self.last_odom.south_to_north_position\n self.pf.predict(delta)\n self.last_odom = msg", "def waypoints_cb(self, msg):\n rospy.loginfo(rospy.get_name() + ': waypoints received')\n self.base_waypoints = msg.waypoints", "def _on_pose(self, msg):\n if self._history_length == Dashboard.POSE_MAX_TIMESTEPS:\n self._pose_history[:, :-1] = self._pose_history[:, 1:]\n else:\n self._history_length += 1\n\n self._pose_history[:, self._history_length-1] = [\n rospy.Time.now().to_time() % 1000,\n msg.x,\n msg.y,\n msg.theta,\n msg.linear_velocity,\n msg.angular_velocity,\n ]", "def test_vw_controller(self):\n pass\n\n yarp.Network.init()\n\n pose_stream = yarp.BufferedPortBottle()\n pose_stream.open(\"/morse/test/pose/in\")\n yarp.Network.connect(\"/morse/robots/ATRV/Pose/out\", \"/morse/test/pose/in\")\n\n cmd_stream = yarp.BufferedPortBottle()\n cmd_stream.open(\"/morse/test/vw/out\")\n yarp.Network.connect(\"/morse/test/vw/out\", \"/morse/robots/ATRV/Motion_Controller/in\")\n \n # Read the start position, it must be (0.0, 0.0, 0.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n\n send_speed(cmd_stream, 1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 1.0, -math.pi/4.0, 2.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 0.5, -math.pi/8.0, 12.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -2.0, math.pi/2.0, 3.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n yarp.Network.fini()", "def run_keypoint_detection(self, wait_for_result=True, move_to_stored_pose=True, clear_state=True):\n\n if clear_state:\n self._clear_cache()\n self.state.clear()\n\n\n\n if move_to_stored_pose:\n\n CMT = CategoryManipulationType\n q = self._stored_poses_director[\"General\"][\"home\"] # for mugs\n if MANIP_TYPE in [CMT.SHOE_ON_RACK, CMT.SHOE_ON_TABLE]:\n q = self._stored_poses_director['General']['center_back']\n else: # basically all mugs\n q = self._stored_poses_director[\"General\"][\"home\"]\n\n self.robotService.moveToJointPosition(q,\n maxJointDegreesPerSecond=self.graspingParams['speed']['fast'])\n\n rgbdWithPoseMsg = self.captureRgbdAndCameraTransform()\n self.state.cache['rgbd_with_pose_list'] = []\n self.state.cache['rgbd_with_pose_list'].append(rgbdWithPoseMsg)\n\n # request via a ROS Action\n rospy.loginfo(\"waiting for KeypointDetection server\")\n self.keypoint_detection_client.wait_for_server()\n rospy.loginfo(\"connected to KeypointDetection server\")\n\n goal = pdc_ros_msgs.msg.KeypointDetectionGoal()\n goal.rgbd_with_pose_list = self.state.cache['rgbd_with_pose_list']\n goal.camera_info = self.camera_info_subscriber.waitForNextMessage()\n\n if EXPERIMENT_MODE:\n goal.output_dir = \"mankey_experiments/%s\" %(spartanUtils.get_current_YYYY_MM_DD_hh_mm_ss())\n\n rospy.loginfo(\"requesting action from KeypointDetection server\")\n\n self.keypoint_detection_client.send_goal(goal)\n self.state.set_status(\"ABOVE_TABLE\")\n\n if wait_for_result:\n self.wait_for_keypoint_detection_result()", "def __init__(self):\n self._current = Pose() # initlize correctly #Pose() \n self._odom_list = tf.TransformListener()\n rospy.Timer(rospy.Duration(.1), self.timerCallback)\n self._vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop',Twist, queue_size=1)\n rospy.Subscriber('/move_base_simple/goal2', PoseStamped, self.navToPose, queue_size=1) # handle nav goal events\n rospy.Subscriber('/mobile_base/events/bumper', BumperEvent, self.readBumper, queue_size=1) # handle bumper events", "def process_state_info(self, state):\n K3Supervisor.process_state_info(self,state)\n\n # The pose for controllers\n self.parameters.pose = self.pose_est\n \n # Distance to the goal\n self.distance_from_goal = sqrt((self.pose_est.x - self.parameters.goal.x)**2 + (self.pose_est.y - self.parameters.goal.y)**2)\n \n # Sensor readings in real units\n self.parameters.sensor_distances = self.get_ir_distances()", "def listen(self):\n\n if not self.key_data:\n self.key_data = {}\n for i in range(1024):\n self.key_data[i] = False\n\n if not self.axis_data:\n self.axis_data = {}\n for i in range(self.controller.get_numaxes()):\n self.axis_data[i] = 0.0\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n debug_toggle = True\n print_state_toggle = True\n\n # These parameters define how frequesnt speed setting sent over serial to arduino\n speed_threshold = 10.0 # sets update threshold\n speed_step = 1 # sets acceleration\n speed_delay = 0.01 # delay per 1 step in sec\n\n mode_switch = \"j\" # control mode: k - keyboard, j - joystick\n\n # Parameters for keyboard control mode\n speed = 0.0\n speed_current = 0\n direction = \"r\" # r - release, f - forward, b - backward\n direction_current = \"r\"\n\n # Parameters for joystick control mode\n speed_l = 0\n speed_r = 0\n prev_speed_l = 0\n prev_speed_r = 0\n prev_btn = False\n\n while True:\n prev = self.axis_data\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n self.key_data[event.key] = True\n elif event.type == pygame.KEYUP:\n self.key_data[event.key] = False\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n # check for exit command\n if self.button_data[9] or self.key_data[pygame.QUIT] or self.key_data[pygame.K_ESCAPE]:\n pygame.quit()\n break\n\n # toggle debug\n if self.key_data[pygame.K_d]:\n if debug_toggle:\n print(\"Toggle debug\")\n self.ser.write(b'd')\n debug_toggle = False\n else:\n debug_toggle = True\n\n # print out motors status\n if self.key_data[pygame.K_p]:\n if print_state_toggle:\n self.ser.write(b'p')\n if self.ser.in_waiting:\n print (self.ser.readline())\n print_state_toggle = False\n else:\n print_state_toggle = True\n\n if self.key_data[pygame.K_1] and mode_switch != \"k\":\n mode_switch = \"k\"\n\n if self.key_data[pygame.K_2] and mode_switch != \"j\":\n print(\"Joystick mode: ON\")\n mode_switch = \"j\"\n\n if mode_switch == \"k\": # keyboard control mode\n # accelearte forward\n if self.key_data[pygame.K_a] and direction != \"r\":\n if speed < 255.0:\n speed = speed + speed_step\n sleep(speed_delay)\n # accelerate backward\n if self.key_data[pygame.K_z] and direction != \"r\":\n if speed > 0.0:\n speed = speed - speed_step\n sleep(speed_delay)\n\n if self.key_data[pygame.K_UP] and direction != \"f\":\n direction = \"f\"\n if self.key_data[pygame.K_DOWN] and direction != \"b\":\n direction = \"b\"\n if self.key_data[pygame.K_UP] == False and direction == \"f\":\n direction = \"r\"\n if self.key_data[pygame.K_DOWN] == False and direction == \"b\":\n direction = \"r\"\n\n if math.fabs(speed - speed_current) > speed_threshold or direction != direction_current:\n # print(\"{0}, {1}, {2}, {3}\".format(speed, speed_current, direction, direction_current))\n direction_current = direction\n if direction == \"r\":\n speed = 0.0\n speed_current = int(speed)\n str_r = \"sr\" + direction_current + str(speed_current) + \"e\"\n str_l = \"sl\" + direction_current + str(speed_current) + \"e\"\n print(str_l)\n print(str_r)\n self.ser.write(str_r.encode())\n self.ser.write(str_l.encode())\n\n if(self.key_data[pygame.K_LEFT]):\n str_rf = \"srf\" + str(speed_current) + \"e\"\n self.ser.write(str_rf.encode())\n str_lf = \"slf\" + str(int(speed_current*0.9)) + \"e\"\n self.ser.write(str_lf.encode())\n elif(self.key_data[pygame.K_RIGHT]):\n str_rb = \"srf\" + str(int(speed_current*0.9)) + \"e\"\n self.ser.write(str_rb.encode())\n str_lb = \"slf\" + str(speed_current) + \"e\"\n self.ser.write(str_lb.encode())\n\n if (self.key_data[pygame.K_UP] == False and self.key_data[pygame.K_DOWN] == False) and (self.key_data[pygame.K_a] == False and self.key_data[pygame.K_z] == False):\n speed = 0\n speed_current = speed\n direction = \"r\"\n direction_current = direction\n self.ser.write(b'srze')\n self.ser.write(b'slze')\n\n if mode_switch == \"j\": # joystick control mode\n if self.ser.in_waiting:\n data = str(self.ser.readline().strip())\n data = data[2 :len(data)-1]\n print(data)\n #self.aio.send('Team Hacky Slackers', data)\n\n prev_speed_l = speed_l\n prev_speed_r = speed_r\n speed_threshold = 1\n\n #simplified linear mapping for controller\n speed_l = int((self.axis_data[0]*(-50)) + 90)\n speed_r = int(math.fabs(self.axis_data[3]*255))\n #print(self.axis_data)\n #print(\"curr_l: {0}, perv_l: {1}, curr_r:{2}, perv_r:{3}\".format(speed_l, prev_speed_l, speed_r,prev_speed_r))\n\n if self.axis_data[0] < -0.05 and math.fabs(speed_l - prev_speed_l) > speed_threshold:\n str_lf = \"slf\" + str(speed_l) + \"e\"\n self.ser.write(str_lf.encode())\n elif self.axis_data[0] > 0.05 and math.fabs(speed_l - prev_speed_l) > speed_threshold:\n str_lb = \"slb\" + str(speed_l) + \"e\"\n self.ser.write(str_lb.encode())\n\n\n if self.axis_data[3] < -0.03 and math.fabs(speed_r - prev_speed_r) > speed_threshold:\n str_rf = \"srf\" + str(speed_r) + \"e\"\n self.ser.write(str_rf.encode())\n elif self.axis_data[3] > 0.03 and math.fabs(speed_r - prev_speed_r) > speed_threshold:\n str_rb = \"srb\" + str(speed_r) + \"e\"\n self.ser.write(str_rb.encode())\n\n if ( self.axis_data[0] >= -0.05 and self.axis_data[0] <= 0.05 ) and ( self.axis_data[3] >= -0.05 and self.axis_data[3] <= 0.05 ):\n speed_l = 90\n speed_r = 0\n self.ser.write(b'srze')\n self.ser.write(b'slze')\n\n #Logic to call RFID scan only once per click of R1 button\n # if(prev_btn != self.button_data[5]):\n # prev_btn = self.button_data[5]\n # if self.button_data[5] :\n # print(\"Scanning for RFID Card\")\n # self.ser.write(\"i\".encode())\n\n # clear()\n # pprint.pprint(self.button_data)\n # pprint.pprint(self.axis_data)\n # pprint.pprint(self.hat_data)", "def updateState(self):\n\t\t# ask for current pose data\n\t\tcomm.write(b'id1 mav.pose_sensor get_local_data \\n')\n\t\t# update x value\n\t\tcomm.read_until(b'\"x\": ') # b'' as Telnet needs a bytes object instead of string since Python3\n\t\tread = comm.read_until(b',') # returns read values + finishing ','\n\t\tread = read[:-1] # cut that ','\n\t\tcurrent_state.x = float(read)\n\t\tself.state_x_label.set_text(\"%0.2f\" % current_state.x)\n\t\t# update y value\n\t\tcomm.read_until(b'\"y\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y = float(read)\n\t\tself.state_y_label.set_text(\"%0.2f\" % current_state.y)\n\t\t# update z value\n\t\tcomm.read_until(b'\"z\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.z = float(read)\n\t\tself.state_z_label.set_text(\"%0.2f\" % current_state.z)\n\t\t# update yaw value\n\t\tcomm.read_until(b'\"yaw\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.psi = float(read)\n\t\tself.state_psi_label.set_text(\"%0.2f\" % current_state.psi)\n\t\t# update pitch value\n\t\tcomm.read_until(b'\"pitch\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.theta = float(read)\n\t\tself.state_theta_label.set_text(\"%0.2f\" % current_state.theta)\n\t\t# update roll value\n\t\tcomm.read_until(b'\"roll\": ')\n\t\tread = comm.read_until(b'}')\n\t\tread = read[:-1]\n\t\tcurrent_state.phi = float(read)\n\t\tself.state_phi_label.set_text(\"%0.2f\" % current_state.phi)\n\n\t\t# ask for current velocity data\n\t\tcomm.write(b'id1 mav.velocity_sensor get_local_data \\n')\n\t\t# update p value\n\t\tcomm.read_until(b'\"angular_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.p = float(read)\n\t\tself.state_p_label.set_text(\"%0.2f\" % current_state.p)\n\t\t# update q value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.q = float(read)\n\t\tself.state_q_label.set_text(\"%0.2f\" % current_state.q)\n\t\t# update r value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.r = float(read)\n\t\tself.state_r_label.set_text(\"%0.2f\" % current_state.r)\n\n\t\t# update x_dot value\n\t\tcomm.read_until(b'\"world_linear_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.x_dot = float(read)\n\t\tself.state_x_dot_label.set_text(\"%0.2f\" % current_state.x_dot)\n\t\t# update y_dot value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y_dot = float(read)\n\t\tself.state_y_dot_label.set_text(\"%0.2f\" % current_state.y_dot)\n\t\t# update z_dot value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.z_dot = float(read)\n\t\tself.state_z_dot_label.set_text(\"%0.2f\" % current_state.z_dot)\n\n\t\t# update first waypoint for trajectory in GUI\n\t\twaypoints_gui[0] = [current_state.x, current_state.y, current_state.z, current_state.psi]\n\n\t\treturn GLib.SOURCE_CONTINUE", "def listen(self):\n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n axis=self.axis_data\n\n if 0 in axis:\n self.x=axis[0]\n self.y=-axis[1]\n\n # Turbo\n if self.button_data[7]:\n self.x*=2\n self.y*=2\n # Start Camera\n if self.button_data[3]:\n subprocess.Popen([\"firefox\",otraip+\"/html\"],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return \"camera\"\n\n # Measure\n if self.button_data[1]:\n return \"measure\"\n\n # Exit\n if self.button_data[2]:\n return \"exit\"\n return \"move \"+str(self.x)+\" \"+str(self.y)+\"\\n\"", "def test_process_metadata_0(self):\n data = ET.parse(\"data/metadata_0.xml\")\n data_str = ET.tostring(data.getroot())\n\n pre = tesse_ros_bridge.enu_T_unity\n post = tesse_ros_bridge.brh_T_blh\n\n dict = tesse_ros_bridge.utils.parse_metadata(data_str)\n proc = tesse_ros_bridge.utils.process_metadata(dict, dict['time']-2,\n [0,0,0], np.identity(3))\n\n transform = proc['transform']\n transform_R = transform[:3,:3]\n transform_t = transform[:3,3]\n\n # First check the transformation matrix.\n # Right-handed check.\n self.assertEqual(np.linalg.det(transform_R), 1)\n # X and Z axes are switched:\n self.assertEqual(transform_t[0], dict['position'][0])\n self.assertEqual(transform_t[1], dict['position'][2])\n self.assertEqual(transform_t[2], dict['position'][1])\n\n truth_quat = tf.transformations.quaternion_from_matrix((pre.dot(\n post)).dot(tf.transformations.quaternion_matrix(\n dict['quaternion'])))\n self.assertTrue(np.allclose(proc['quaternion'], truth_quat))\n\n self.assertTrue(np.allclose(proc['velocity'],\n post[:3,:3].dot(dict['velocity'])))\n\n # TODO(marcus): this is not correct.\n self.assertTrue(np.allclose(proc['ang_vel'],\n post[:3,:3].dot(dict['ang_vel'])))\n\n # print dict['ang_vel']\n\n self.assertTrue(np.allclose(proc['acceleration'], proc['velocity']*0.5))\n\n self.assertEqual(proc['time'], dict['time'])\n self.assertEqual(proc['collision_status'], dict['collision_status'])", "def motion_F(input_line, cur, count):\n return start_catching_keys(1, \"cb_motion_F\", input_line, cur, count)", "def publish_moved_distance(self):\n rospy.spin()\n \n \n \n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()", "def handle_sensor_data(data):\n \n #print dir( data )\n D.data = data\n\n #Check for a bump\n if data.bumpRight or data.bumpLeft:\n print \"Bumped!\"\n\n\n #Check if play button was pressed\t\n if data.play:\n\tprint \"Play button pressed!\"\n\tStateMachine.state_stop()\n\trospy.signal_shutdown(\"play button pressed\")", "def processOdom(self, msg):\n self.x = msg.pose.pose.position.x\n self.y = msg.pose.pose.position.y\n self.z = 180 * (msg.pose.pose.orientation.z % 2)\n print(self.z)", "async def listen(self):\n \n print(\"start\")\n await asyncio.sleep(1)\n for event in pygame.event.get():\n print(\"Start2\")\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n # Insert your code on what you would like to happen for each event here!\n # In the current setup, I have the state simply printing out to the screen.\n \n #os.system('clear')\n #pprint.pprint(self.button_data)\n try:\n if self.axis_data[0] < -0.2:\n #left()\n time.sleep(0.1)\n self.isRoutating = True\n \n elif self.axis_data[0] >0.2:\n #right()\n time.sleep(0.1)\n self.isRoutating = True\n\n else:\n if self.isRoutating is True:\n #stop()\n self.isRoutating = False\n\n\n if self.axis_data[4] > 0:\n if self.isMoving == 1:\n self.isMoving = 0\n #stop()\n time.sleep(0.05)\n continue\n #down()\n self.isMoving = -1\n \n elif self.axis_data[5] > 0:\n if self.isMoving == -1:\n self.isMoving = 0\n #stop()\n time.sleep(0.05)\n continue\n #up()\n self.isMoving = 1\n\n else:\n if self.isMoving == 1 or self.isMoving == -1:\n #stop()\n self.isMoving = 0\n \n #pprint.pprint(self.hat_data)\n except Exception as e:\n #print(e)\n pass", "def ev_joyaxismotion(self, event: tcod.event.JoystickAxis) -> T | None:", "def odometry_callback(self, msg):\n if not bool(self.config):\n return\n\n linear = msg.twist.twist.linear\n angular = msg.twist.twist.angular\n v_linear = numpy.array([linear.x, linear.y, linear.z])\n v_angular = numpy.array([angular.x, angular.y, angular.z])\n\n if self.config['odom_vel_in_world']:\n # This is a temp. workaround for gazebo's pos3d plugin not behaving properly:\n # Twist should be provided wrt child_frame, gazebo provides it wrt world frame\n # see http://docs.ros.org/api/nav_msgs/html/msg/Odometry.html\n xyzw_array = lambda o: numpy.array([o.x, o.y, o.z, o.w])\n q_wb = xyzw_array(msg.pose.pose.orientation)\n R_bw = transf.quaternion_matrix(q_wb)[0:3, 0:3].transpose()\n\n v_linear = R_bw.dot(v_linear)\n v_angular = R_bw.dot(v_angular)\n \n # Compute compute control output:\n t = time_in_float_sec_from_msg(msg.header.stamp)\n \n e_v_linear = (self.v_linear_des - v_linear)\n e_v_angular = (self.v_angular_des - v_angular)\n \n a_linear = self.pid_linear.regulate(e_v_linear, t)\n a_angular = self.pid_angular.regulate(e_v_angular, t)\n\n # Convert and publish accel. command:\n cmd_accel = geometry_msgs.Accel()\n cmd_accel.linear = geometry_msgs.Vector3(x=a_linear[0], y=a_linear[1], z=a_linear[2])\n cmd_accel.angular = geometry_msgs.Vector3(x=a_angular[0], y=a_angular[1], z=a_angular[2])\n self.pub_cmd_accel.publish(cmd_accel)", "def run(self):\n while self.is_connected():\n self.__ticker.tick() # Tick (sleep)\n\n if self.process and self.process.is_alive():\n self.update()\n continue\n\n c = getkey() \n if c:\n if c == 'w':\n print \"Moving forward\"\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"forward\")\n elif c == 'a':\n print \"Turning left\"\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"left\")\n elif c == 'd':\n print \"Turning right\"\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"right\")\n elif c == 's':\n print \"Moving backward\"\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"backward\")\n elif c == 'q':\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"stop\")\n elif c == 'r':\n self.add_property(\"name\", \"pioneer_command\")\n self.add_property(\"pioneer_command\", \"terminate\")\n elif c == 't':\n self.add_property(\"name\", \"map_command\")\n self.add_property(\"map_command\", \"make_scan\")\n elif c == 'm':\n self.add_property(\"name\", \"map_command\")\n self.add_property(\"map_command\", \"save_map\")\n elif c == 'l':\n self.add_property(\"name\", \"map_command\")\n self.add_property(\"map_command\", \"load_map\")\n elif c == 'c':\n self.add_property(\"name\", \"map_command\")\n self.add_property(\"map_command\", \"match_map\")\n elif c == 'x':\n self.add_property(\"name\", \"map_command\")\n self.add_property(\"map_command\", \"make_map\")\n elif c == 'z':\n self.add_property(\"name\", \"map_command\")\n self.add_property(\"map_command\", \"discard_map\")\n elif c == 'p':\n self.save_pose()\n elif c == 'g':\n self.add_property(\"name\", \"map_command\")\n self.add_property(\"map_command\", \"toggle_autoscan\")\n elif c == 'h':\n print \"[w] = forward [a] = left [s] = backward [d] = right\"\n print \"[q] = stop [t] = take scan [c] = matching mode [x] = mapping mode\"\n print \"[m] = save map [l] = load map [p] = save current pose [g] = Toggle autoscanning\"\n \n ############################\n # Send data\n self.update()", "def currentstate_callback(self, odom):\n self.CurrentPosition = np.array([odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z])\n self.CurrentVelocity = np.array([odom.twist.twist.linear.x, odom.twist.twist.linear.y, odom.twist.twist.linear.z])", "def refSpeed_callback(self, msg):\n self.mutex.acquire()\n\n self.speed_ref[0] = msg.vx\n self.speed_ref[1] = msg.vy\n self.speed_ref[2] = msg.vz\n\n self.mutex.release()\n rospy.loginfo(\"%s receive speed reference\", self.node_name)", "def pose_callback(msg):\n\t#Print the values of the x,y,theta of the Turtle:\n rospy.loginfo(\"x: %.11f, y: %.11f, theta: %.11f \", msg.x, msg.y, msg.theta)", "def data_collection():\n global PAUSED\n print(\"Detecting nodes\")\n while True:\n data = SOCK.recvfrom(1024)[0] # buffer size is 1024 bytes\n message = data.decode()\n try:\n message_function = message[0]\n message = message[1:]\n \n if message_function == \"t\":\n loc, temp, hum = message.split(\", \")\n temp = (float(temp) * 1.8) + 32 # convert from C to F\n\n # Checks if location is alreay in the rolling_X dictionarys. If not, it creates an entry\n # in the dictionary and populates it with the defaults\n if loc not in ROLLING_TEMPS:\n ROLLING_TEMPS[loc] = copy(TEMPDEQUEDEFAULT)\n print(loc, \"has connected\")\n if loc not in ROLLING_HUMS:\n ROLLING_HUMS[loc] = copy(HUMDEQUEDEFAULT)\n\n # Append new temp and humidity to appropriate deque in dictionaries\n ROLLING_TEMPS[loc].appendleft(temp)\n ROLLING_HUMS[loc].appendleft(hum)\n LAST_RECEIVED[loc] = datetime.datetime.utcnow()\n \n elif message_function == \"c\":\n if message == \"pause\":\n PAUSED = True\n print(\"pausing\")\n elif message == \"unpause\":\n PAUSED = False\n print(\"unpausing\")\n else:\n print(\"unknown command function\")\n elif message_function == \"i\":\n if message == \"status\":\n print(\"Paused:\", PAUSED)\n else:\n print(\"unknown info function\")\n except:\n print(\"malformed data\")", "def main():\n\n # Initialize ROS node\n # rospy.init_node('dope')\n dopenode = DopeNode()\n image_path = \\\n \"/media/aditya/A69AFABA9AFA85D9/Cruzr/code/Dataset_Synthesizer/Test/Zed/NewMap1_turbosquid_can_only/000000.left.png\"\n # image_path = \\\n # \"/media/aditya/A69AFABA9AFA85D9/Cruzr/code/Dataset_Synthesizer/Test/Zed/NewMap1_dope/000001.left.png\"\n camera_ns = rospy.get_param('camera', 'dope/webcam')\n info_manager = CameraInfoManager(cname='dope_webcam_{}'.format(0),\n namespace=camera_ns)\n try:\n camera_info_url = rospy.get_param('~camera_info_url')\n if not info_manager.setURL(camera_info_url):\n rospy.logwarn('Camera info URL invalid: %s', camera_info_url)\n except KeyError:\n # we don't have a camera_info_url, so we'll keep the\n # default ('file://${ROS_HOME}/camera_info/${NAME}.yaml')\n pass\n info_manager.loadCameraInfo()\n if not info_manager.isCalibrated():\n rospy.logwarn('Camera is not calibrated, please supply a valid camera_info_url parameter!')\n camera_info = info_manager.getCameraInfo()\n dopenode.run_on_image(image_path, camera_info)\n \n # try:\n # rospy.spin()\n # except rospy.ROSInterruptException:\n # pass", "def __init__(self, maneuver_velocity_setpoint,\n maneuver_reference_frame,\n maneuver_duration):\n\n # Create node with name 'translation_controller' and set update rate\n rospy.init_node('translation_controller')\n\n # A publisher which will publish the desired linear and anglar velocity to the topic '/.../cmd_vel_unstamped'\n self.vel_setpoint_pub = rospy.Publisher('/mavros/setpoint_velocity/cmd_vel_unstamped', Twist, queue_size = 1)\n self.vel_setpoint_bu_lenu__lenu = Twist()\n\n # A subscriber to the topic '/mavros/state'. self.state is called when a message of type 'State' is recieved\n self.state_sub = rospy.Subscriber(\"/mavros/state\", State, self.state_cb)\n self.current_state = State()\n self.prev_state = State()\n\n # A subscriber to the /mavros/local_position/pose topic that is used to access the transform between the body-up\n # and local ENU frames\n self.pose_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, self.pose_sub_cb)\n self.q_bu_lenu = None\n\n self.rate = rospy.Rate(Constants.RATE)\n self.offboard_point_streaming = False\n self.static_transforms = StaticTransforms()\n self.maneuver_velocity_setpoint = maneuver_velocity_setpoint\n self.maneuver_reference_frame = maneuver_reference_frame\n self.maneuver_duration = maneuver_duration", "def read(self, data):\n\n self.log('Received NMEA data:', data, lvl=debug)\n # self.log(data, pretty=True)\n\n # TODO: A sentence might contain multiple values, those should be mapped\n # to the fields attribute accordingly.\n\n final_data = {\n 'measurement': data.type,\n 'time': data.timestamp,\n 'coordinate': self.current_position,\n 'fields': {\n data.value\n }\n }\n\n self.log('Recording final data:', final_data, pretty=True, lvl=debug)\n self.client.write_points(final_data)", "def on_message(client, userdata, msg):\n\n message = loads(msg.payload)[0]\n\n if \"sequence\" in message:\n sequence = message['sequence']\n else:\n print('Sequence no. not available.')\n sequence = 1 \n \n if sequence > 0:\n try:\n # store the received data in powerflow_results\n powerflow_results = villas_node_interface.receiveVillasNodeInput(system, message, input_mapping_vector)\n\n # read measurements from file\n measurements_set = measurement.MeasurementSet()\n\n if sequence < 90:\n measurements_set.read_measurements_from_file(powerflow_results, meas_configfile1)\n scenario_flag = 1\n else:\n measurements_set.read_measurements_from_file(powerflow_results, meas_configfile2)\n scenario_flag = 2\n\n # calculate the measured values (affected by uncertainty)\n measurements_set.meas_creation(dist=\"uniform\", seed=sequence)\n # Performs state estimation\n state_estimation_results = nv_state_estimator.DsseCall(system, measurements_set)\n\n # send results to message broker\n villasOutput = villas_node_interface.sendVillasNodeOutput(message, output_mapping_vector, powerflow_results,\n state_estimation_results, scenario_flag)\n client.publish(topic_publish, villasOutput, 0)\n\n # Finished message\n print(\"Finished state estimation for sequence \" + str(sequence))\n\n except Exception as e:\n print(e)\n traceback.print_tb(e.__traceback__)\n sys.exit()", "def ev_joyballmotion(self, event: tcod.event.JoystickBall) -> T | None:", "def __init__(self):\n rospy.init_node(\"kinect_transformer\")\n self.kinect_depth_sub = rospy.Subscriber(\"kinect/depth/points\", pc2.PointCloud2, self.kinect_cb, queue_size=10)\n self.left_obs_pub = rospy.Publisher(\"left_arm_obstacles\", PointCloud, queue_size=10, latch=True)\n self.right_obs_pub = rospy.Publisher(\"right_arm_obstacles\", PointCloud, queue_size=10, latch=True)\n self.tf = tf.TransformListener()\n self.closest_rgb_points = []\n # create collision checkers with the left and right kin solver instances\n self.left_cc = CollisionChecker([], KDLIKSolver(\"left\"))\n self.right_cc = CollisionChecker([], KDLIKSolver(\"right\"))", "def collect(self):\n self.prev_position = {'lefthand':kinect.get_coord('lefthand'), \n 'righthand':kinect.get_coord('righthand')}\n \n while True:\n for hand in 'lefthand','righthand':\n position = kinect.get_coord(hand)\n displacement = vector.Subtract(position, self.prev_position[hand])\n acceleration = vector.Distance(displacement, self.prev_displacement[hand])\n self.prev_position[hand] = position\n self.prev_displacement[hand] = displacement\n # We square the acceleration so the variance is exaggerated\n self.accelerations[hand].append(acceleration**2) \n \n # Limit to latest self.sample_limit samples\n self.accelerations[hand] = self.accelerations[hand][-self.sample_limit:] \n self.velocities[hand].append(vector.Magnitude(displacement))\n self.velocities[hand] = self.velocities[hand][-self.sample_limit:]\n time.sleep(self.rate)", "def analyze_video(vidNum_iter, config, pointInds_toUse, pts_spaced, session): # function needed for multiprocessing\n\n optic = config['Optic']\n\n numVids = session['num_vids']\n path_vid_allFiles = session['videos']\n lk_names = [key for key in optic.keys() if 'lk_' in key]\n lk_params = {k.split('lk_')[1]: (tuple(optic[k]) if type(optic[k]) is list else optic[k]) \\\n for k in lk_names}\n\n vid = imageio.get_reader(path_vid_allFiles[vidNum_iter], 'ffmpeg')\n # metadata = vid.get_meta_data()\n\n path_vid = path_vid_allFiles[vidNum_iter] # get path of the current vid\n video = cv2.VideoCapture(path_vid) # open the video object with openCV\n numFrames = int(video.get(\n cv2.CAP_PROP_FRAME_COUNT)) # get frame count of this vid GENERALLY INACCURATE. OFF BY AROUND -25 frames\n\n frameToSet = 0\n frame = vid.get_data(\n frameToSet) # Get a single frame to use as the first 'previous frame' in calculating optic flow\n new_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n old_frame = new_frame_gray\n\n displacements_tmp = np.zeros((pts_spaced.shape[0], 2, np.uint64(numFrames + (numVids * 1000)))) * np.nan\n\n print(' ', end='', flush=True)\n text = \"progresser #{}\".format(vidNum_iter)\n print(f'\\n Calculating displacement field: video # {vidNum_iter + 1}/{numVids}')\n\n for iter_frame, new_frame in enumerate(tqdm(vid, total=numFrames, desc=text, position=vidNum_iter)):\n new_frame_gray = cv2.cvtColor(new_frame, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\n ##calculate optical flow\n pointInds_new, status, error = cv2.calcOpticalFlowPyrLK(old_frame, new_frame_gray, pointInds_toUse, None,\n **lk_params) # Calculate displacement distance between STATIC/ANCHORED points and the calculated new points. Also note the excluded 'NextPts' parameter. Could be used for fancier tracking\n\n ## Calculate displacement and place into variable 'displacements' (changes in size every iter)\n if iter_frame == 0:\n displacements_tmp[:, :, iter_frame] = np.zeros((pts_spaced.shape[0], 2))\n else:\n displacements_tmp[:, :, iter_frame] = np.single(np.squeeze((\n pointInds_new - pointInds_toUse))) # this is the important variable. Simply the difference in the estimate\n\n old_frame = new_frame_gray # make current frame the 'old_frame' for the next iteration\n\n return displacements_tmp", "def keyboard_control(args_, pygame_clock, world_, client_, runtime_bucket,\n moment_data, saved_idx, global_actor_list):\n # the rgb camera and seg camera are in the args\n # since we may need to change the fov for them\n # by rebuild the camera actor\n\n # return True to exit\n ms_since_last_tick = pygame_clock.get_time()\n # TOdo: change the following. dont get the transform at each time to avoid\n # stuttering due to the frame lag\n # set a global variable of the rotation instead\n prev_rotation = args_.spectator.get_transform().rotation\n prev_location = args_.spectator.get_transform().location\n global_up_vector = carla.Vector3D(x=0, z=1, y=0)\n # a normalized x,y,z, between 0~1\n forward_vector = prev_rotation.get_forward_vector()\n left_vector = cross(forward_vector, global_up_vector)\n global_forward_vector = cross(global_up_vector, left_vector)\n\n # get all event from event queue\n # empty out the event queue\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n # click a point to add traj or add new actor\n elif event.type == pygame.MOUSEBUTTONUP:\n if runtime_bucket[\"playing_moment\"]:\n continue\n # get the clicking xyz first\n # get the depth map in meters\n depth_in_meters = parse_carla_depth(args_.depth_camera.rgb_image)\n pos_x, pos_y = pygame.mouse.get_pos()\n click_point = np.array([pos_x, pos_y, 1])\n\n intrinsic = args_.rgb_camera.camera_actor.intrinsic\n # 3d point in the camera coordinates\n click_point_3d = np.dot(np.linalg.inv(intrinsic), click_point)\n click_point_3d *= depth_in_meters[pos_y, pos_x]\n # why? this is because unreal transform is (y, -z , x)???\n y, z, x = click_point_3d\n z = -z\n click_point_3d = np.array([x, y, z, 1])\n click_point_3d.reshape([4, 1])\n\n # transform to the world origin\n camera_rt = compute_extrinsic_from_transform(\n args_.rgb_camera.camera_actor.get_transform())\n click_point_world_3d = np.dot(camera_rt, click_point_3d)\n x, y, z = click_point_world_3d.tolist()[0][:3] # since it is np.matrix\n xyz = [x, y, z + 0.1] # slightly above ground\n\n if runtime_bucket[\"waiting_for_click\"]:\n runtime_bucket[\"waiting_for_click\"] = False\n # adding a new actor at the click location\n # or add a new destination for the x_agent\n\n new_actor_type = runtime_bucket[\"new_actor_type\"]\n\n if new_actor_type == \"destination\":\n # check whether the currently selected guy is x_agent\n p_id = runtime_bucket[\"selected\"]\n if p_id in moment_data[runtime_bucket[\"cur_moment_idx\"]][\"x_agents\"]:\n # check whether the destination make sense\n last_obs = runtime_bucket[\n \"person_data\"][runtime_bucket[\"selected\"]][args.obs_length-1]\n last_obs_speed = last_obs[\"speed\"]\n\n # meter / second\n # pred_length is 26 timestep, 10.4 seconds\n max_dest_dist = last_obs_speed * \\\n (args.pred_length / args.annotation_fps)\n\n last_obs_xyz = last_obs[\"xyz\"]\n diff = [(xyz[i] - last_obs_xyz[i])**2 for i in range(2)] # ignore z\n diff = math.sqrt(sum(diff))\n if diff > max_dest_dist:\n print(\"Destination too far away. Dist to last obs: %s\" % diff)\n else:\n this_moment_data = moment_data[runtime_bucket[\"cur_moment_idx\"]]\n this_moment_data[\"x_agents\"][p_id].append(xyz)\n print(\"Set destination #%s for person #%s. Dist to last obs: %s\" \\\n % (len(this_moment_data[\"x_agents\"][p_id]), p_id, diff))\n\n else:\n new_p_id = runtime_bucket[\"new_p_id\"]\n new_frame_id = 0 # new actor always start from first frame?\n new_control_point = [new_p_id, new_frame_id, xyz, None, None, None,\n False]\n\n add_new_control_point(\n moment_data, new_control_point, new_frame_id,\n runtime_bucket,\n is_vehicle=runtime_bucket[\"new_actor_type\"] == \"vehicle\")\n\n # select the new guy\n runtime_bucket[\"selected\"] = new_p_id\n runtime_bucket[\"is_vehicle\"] = \\\n runtime_bucket[\"new_actor_type\"] == \"vehicle\"\n\n runtime_bucket[\"new_p_id\"] += 1\n\n else:\n # check whether there is a selected actor first\n all_actors = sorted(runtime_bucket[\"person_data\"].keys()) + \\\n sorted(runtime_bucket[\"vehicle_data\"].keys())\n if runtime_bucket[\"selected\"] is not None and \\\n (runtime_bucket[\"selected\"] in all_actors):\n p_id = runtime_bucket[\"selected\"]\n if not runtime_bucket[\"is_vehicle\"]:\n traj_data = runtime_bucket[\"person_data\"][p_id]\n else:\n traj_data = runtime_bucket[\"vehicle_data\"][p_id]\n\n # the current last frame id\n last_frame_id = traj_data[-1][\"frame_id\"]\n new_frame_id = last_frame_id + args.frame_skip\n\n change_type = \"ped_controls\"\n if runtime_bucket[\"is_vehicle\"]:\n change_type = \"vehicle_controls\"\n\n # modify the moment_data\n # a new control point meaning, we need to recompute the\n # direction vector for the last timestep, then add the new point\n # 1. recompute last control point\n prev_controls = \\\n moment_data[runtime_bucket[\"cur_moment_idx\"]][change_type]\n last_frame_id_str = \"%s\" % last_frame_id # json's fault\n # find the control point idx to change\n last_control_point_idx = None\n for i, one in enumerate(prev_controls[last_frame_id_str]):\n if one[0] == p_id:\n last_control_point_idx = i\n break\n if last_control_point_idx is not None:\n direction_vector, speed, time_elasped = get_direction_and_speed(\n [new_frame_id, p_id] + xyz,\n [last_frame_id, p_id] + traj_data[-1][\"xyz\"],\n args.video_fps)\n _, ori_frame_id, _, _, _, _, is_stationary = \\\n prev_controls[last_frame_id_str][last_control_point_idx]\n moment_data[runtime_bucket[\"cur_moment_idx\"]][change_type][\n last_frame_id_str][last_control_point_idx] = \\\n [p_id, ori_frame_id, traj_data[-1][\"xyz\"],\n direction_vector, speed, time_elasped, is_stationary]\n\n # 2. add the new point\n new_control_point = [p_id, new_frame_id, xyz, None,\n None, None, False]\n add_new_control_point(\n moment_data, new_control_point, new_frame_id,\n runtime_bucket,\n is_vehicle=runtime_bucket[\"is_vehicle\"])\n\n\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_r:\n # reset rotation\n args_.spectator.set_transform(carla.Transform(\n rotation=carla.Rotation(pitch=0.0,\n yaw=0.0,\n roll=0.0),\n location=prev_location))\n elif event.key == pygame.K_t:\n # print out the camera transform\n print(args_.spectator.get_transform())\n print(\"camera FOV: %s\" % \\\n args_.rgb_camera.camera_actor.attributes[\"fov\"])\n # an ugly way to change the camera fov\n elif (event.key == pygame.K_n) or (event.key == pygame.K_m):\n if event.key == pygame.K_n:\n new_fov = args_.prev_camera_fov - 5.0\n else:\n new_fov = args_.prev_camera_fov + 5.0\n new_fov = new_fov if new_fov >= 5.0 else 5.0\n new_fov = new_fov if new_fov <= 175.0 else 175.0\n\n set_camera_fov(args_, client_, new_fov)\n\n\n # ------------ selecting moments\n elif (event.key == pygame.K_LEFTBRACKET) or \\\n (event.key == pygame.K_RIGHTBRACKET):\n if runtime_bucket[\"playing_moment\"]:\n continue\n get_idx = 1\n if event.key == pygame.K_LEFTBRACKET:\n get_idx = -1\n cur_moment_idx = runtime_bucket[\"cur_moment_idx\"]\n cur_moment_idx += get_idx\n if cur_moment_idx >= len(moment_data):\n cur_moment_idx = 0\n elif cur_moment_idx < 0:\n cur_moment_idx = len(moment_data) - 1\n\n person_data, vehicle_data = init_moment(\n world_, client_, moment_data, cur_moment_idx,\n global_actor_list)\n runtime_bucket[\"person_data\"] = person_data\n runtime_bucket[\"vehicle_data\"] = vehicle_data\n runtime_bucket[\"selected\"] = sorted(person_data.keys())[0]\n runtime_bucket[\"is_vehicle\"] = False\n runtime_bucket[\"cur_moment_idx\"] = cur_moment_idx\n\n # cleanup static actors\n all_actors = list(runtime_bucket[\"static_peds\"].values()) + \\\n list(runtime_bucket[\"static_vehicles\"].values())\n client.apply_batch(\n [carla.command.DestroyActor(x) for x in all_actors])\n runtime_bucket[\"static_peds\"] = {}\n runtime_bucket[\"static_vehicles\"] = {}\n runtime_bucket[\"static_spawned\"] = False\n\n # toggle saving this moment or not\n elif event.key == pygame.K_p:\n cur_moment_idx = runtime_bucket[\"cur_moment_idx\"]\n #if saved_idxs.has_key(cur_moment_idx):\n if cur_moment_idx in saved_idxs:\n del saved_idxs[cur_moment_idx]\n else:\n saved_idxs[cur_moment_idx] = 1\n # toggle saving all moment or not\n elif event.key == pygame.K_o:\n # saving all\n if len(saved_idxs) < len(moment_data):\n for i in range(len(moment_data)):\n saved_idxs[i] = 1\n else:\n cur_saved_idxs = list(saved_idxs.keys())\n for i in cur_saved_idxs:\n del saved_idxs[i]\n elif event.key == pygame.K_v:\n # go the current scene's anchor camera setting\n scene = moment_data[runtime_bucket[\"cur_moment_idx\"]][\"scenename\"]\n camera_location_preset = anchor_cameras[scene][0]\n args_.spectator.set_transform(camera_location_preset)\n set_camera_fov(args_, client_, anchor_cameras[scene][1])\n # copy the current moment to the end (so it does not affect saved_idxs)\n elif event.key == pygame.K_l:\n cur_moment = moment_data[runtime_bucket[\"cur_moment_idx\"]].copy()\n moment_data.append(cur_moment)\n print(\"Copied current moment to idx %s\" % (len(moment_data)-1))\n # toggle showing traj\n elif event.key == pygame.K_RETURN:\n if runtime_bucket[\"show_traj\"]:\n runtime_bucket[\"show_traj\"] = False\n else:\n runtime_bucket[\"show_traj\"] = True\n # toggle showing the current static actors\n elif event.key == pygame.K_SPACE:\n if runtime_bucket[\"playing_moment\"]:\n continue\n all_actors = list(runtime_bucket[\"static_peds\"].values()) + \\\n list(runtime_bucket[\"static_vehicles\"].values())\n\n if all_actors: # delete all static actors\n client.apply_batch(\n [carla.command.DestroyActor(x) for x in all_actors])\n runtime_bucket[\"static_peds\"] = {}\n runtime_bucket[\"static_vehicles\"] = {}\n # static_spawned is still true so no actor is generated\n else:\n runtime_bucket[\"static_spawned\"] = False\n\n # ---------------------- editing the moment\n # selecting actor\n elif (event.key == pygame.K_COMMA) or (event.key == pygame.K_PERIOD):\n get_idx = 1\n if event.key == pygame.K_COMMA:\n get_idx = -1\n select_new_actor(runtime_bucket, get_idx)\n\n # deleting an actor\n elif (event.key == pygame.K_BACKSPACE):\n if runtime_bucket[\"playing_moment\"]:\n continue\n all_actors = sorted(runtime_bucket[\"person_data\"].keys()) + \\\n sorted(runtime_bucket[\"vehicle_data\"].keys())\n if (len(runtime_bucket[\"person_data\"]) == 1) and \\\n not runtime_bucket[\"is_vehicle\"]:\n print(\"Cannot delete the last person.\")\n elif runtime_bucket[\"selected\"] is not None and \\\n (runtime_bucket[\"selected\"] in all_actors):\n delete_p_id = runtime_bucket[\"selected\"]\n delete_type = \"ped_controls\"\n if runtime_bucket[\"is_vehicle\"]:\n delete_type = \"vehicle_controls\"\n\n # modify the moment_data\n this_moment_data = moment_data[runtime_bucket[\"cur_moment_idx\"]]\n prev_controls = this_moment_data[delete_type]\n new_controls = {}\n for frame_id in prev_controls:\n temp = []\n for one in prev_controls[frame_id]:\n p_id, _, _, _, _, _, _ = one\n if p_id != delete_p_id:\n temp.append(one)\n new_controls[frame_id] = temp\n this_moment_data[delete_type] = new_controls\n # change the data directly\n moment_data[runtime_bucket[\"cur_moment_idx\"]] = this_moment_data\n\n # delete the x_agents\n if delete_p_id in \\\n moment_data[runtime_bucket[\"cur_moment_idx\"]][\"x_agents\"]:\n del moment_data[runtime_bucket[\"cur_moment_idx\"]][\"x_agents\"][\n delete_p_id]\n\n # delete the static guy\n check_static = runtime_bucket[\"static_peds\"]\n if runtime_bucket[\"is_vehicle\"]:\n check_static = runtime_bucket[\"static_vehicles\"]\n if delete_p_id in check_static:\n check_static[delete_p_id].destroy()\n del check_static[delete_p_id]\n\n # get the new traj data\n person_data, vehicle_data = get_trajs(\n moment_data, runtime_bucket[\"cur_moment_idx\"])\n runtime_bucket[\"person_data\"] = person_data\n runtime_bucket[\"vehicle_data\"] = vehicle_data\n\n # select a new guy\n select_new_actor(runtime_bucket, 1)\n\n # ----------------------------- editing the actor trajectory\n # toggle setting all points of the person/vehicle to is_stationary=True\n # except the first point.\n # this is useful for stationary person/vehicle in the scene\n elif (event.key == pygame.K_f) or (event.key == pygame.K_c):\n if runtime_bucket[\"playing_moment\"]:\n continue\n set_all_is_stationary_to = True\n if event.key == pygame.K_c:\n set_all_is_stationary_to = False\n # check selected or not\n all_actors = sorted(runtime_bucket[\"person_data\"].keys()) + \\\n sorted(runtime_bucket[\"vehicle_data\"].keys())\n if runtime_bucket[\"selected\"] is not None and \\\n (runtime_bucket[\"selected\"] in all_actors):\n p_id = runtime_bucket[\"selected\"]\n\n change_type = \"ped_controls\"\n if runtime_bucket[\"is_vehicle\"]:\n change_type = \"vehicle_controls\"\n\n prev_controls = \\\n moment_data[runtime_bucket[\"cur_moment_idx\"]][change_type]\n # not changing the stationary status of the first frame\n first_frame = sorted([int(fid) for fid in prev_controls])[0]\n for frame_id in prev_controls:\n if frame_id != (\"%s\" % first_frame): # json's fault\n for i, one in enumerate(prev_controls[frame_id]):\n if one[0] == p_id:\n one[-1] = set_all_is_stationary_to\n moment_data[runtime_bucket[\"cur_moment_idx\"]][change_type][\n frame_id][i] = one\n\n print(\"Set %s all traj's stationary to %s\" % (\n p_id, set_all_is_stationary_to))\n # get the new traj data\n person_data, vehicle_data = get_trajs(\n moment_data, runtime_bucket[\"cur_moment_idx\"])\n runtime_bucket[\"person_data\"] = person_data\n runtime_bucket[\"vehicle_data\"] = vehicle_data\n\n # press this then click to add new actor\n # toggle\n elif event.key == pygame.K_e:\n if runtime_bucket[\"waiting_for_click\"]:\n runtime_bucket[\"waiting_for_click\"] = False\n else:\n runtime_bucket[\"waiting_for_click\"] = True\n # press this then toggle new actor car or person or destination\n elif event.key == pygame.K_1:\n types = [\"person\", \"vehicle\", \"destination\"]\n cur_idx = types.index(runtime_bucket[\"new_actor_type\"])\n new_idx = cur_idx + 1\n if new_idx >= len(types):\n new_idx = 0\n runtime_bucket[\"new_actor_type\"] = types[new_idx]\n # delete the current selected actor's last traj\n elif event.key == pygame.K_q:\n if runtime_bucket[\"playing_moment\"]:\n continue\n all_actors = sorted(runtime_bucket[\"person_data\"].keys()) + \\\n sorted(runtime_bucket[\"vehicle_data\"].keys())\n if runtime_bucket[\"selected\"] is not None and \\\n (runtime_bucket[\"selected\"] in all_actors):\n delete_p_id = runtime_bucket[\"selected\"]\n\n if not runtime_bucket[\"is_vehicle\"]:\n traj_data = runtime_bucket[\"person_data\"][delete_p_id]\n else:\n traj_data = runtime_bucket[\"vehicle_data\"][delete_p_id]\n\n if len(traj_data) == 1:\n print(\"Cannot delete with only 1 timestep left.\")\n else:\n # change the actual control data and recompute everything\n delete_frame_id = \"%s\" % traj_data[-1][\"frame_id\"] # json's fault\n delete_type = \"ped_controls\"\n if runtime_bucket[\"is_vehicle\"]:\n delete_type = \"vehicle_controls\"\n # todo: need to set the new last timestep to be None\n\n # modify the moment_data\n prev_controls = \\\n moment_data[runtime_bucket[\"cur_moment_idx\"]][delete_type]\n delete_idx = None\n for i, one in enumerate(prev_controls[delete_frame_id]):\n p_id, _, _, _, _, _, _ = one\n if p_id == delete_p_id:\n delete_idx = i\n break\n if delete_idx is not None:\n # Yikes.\n del moment_data[\n runtime_bucket[\"cur_moment_idx\"]][delete_type][\n delete_frame_id][delete_idx]\n\n # get the new traj data\n person_data, vehicle_data = get_trajs(\n moment_data, runtime_bucket[\"cur_moment_idx\"])\n runtime_bucket[\"person_data\"] = person_data\n runtime_bucket[\"vehicle_data\"] = vehicle_data\n elif event.key == pygame.K_g:\n # play the moment\n # always set to true\n # until the moment finished will automatically be false\n runtime_bucket[\"playing_moment\"] = True\n\n # ------------- x agent setting\n\n # toggle setting the current agent as x agent\n elif event.key == pygame.K_x:\n if runtime_bucket[\"playing_moment\"]:\n continue\n # check selected or not\n all_actors = sorted(runtime_bucket[\"person_data\"].keys()) + \\\n sorted(runtime_bucket[\"vehicle_data\"].keys())\n if runtime_bucket[\"selected\"] is not None and \\\n (runtime_bucket[\"selected\"] in all_actors):\n\n p_id = runtime_bucket[\"selected\"]\n if runtime_bucket[\"is_vehicle\"]:\n print(\"Cannot set vehicle as x_agent for now.\")\n continue\n\n # unset x agent.\n if p_id in moment_data[runtime_bucket[\"cur_moment_idx\"]][\"x_agents\"]:\n print(\"Deleted person #%s as x_agent\" % p_id)\n del moment_data[runtime_bucket[\"cur_moment_idx\"]][\"x_agents\"][p_id]\n else:\n # set the actor as x agent\n # need to check whether the trajectory is full length\n frame_ids = [o[\"frame_id\"]\n for o in runtime_bucket[\"person_data\"][p_id]]\n if frame_ids[0] != 0:\n print(\"X_agent person needs to start from 0 frame\")\n continue\n if len(frame_ids) < args.obs_length + args.pred_length:\n print(\"X_agent traj length need to be %s\" % (\n args.obs_length + args.pred_length))\n continue\n print(\"Set person #%s as x_agent\" % p_id)\n moment_data[runtime_bucket[\"cur_moment_idx\"]][\"x_agents\"][p_id] = []\n # delete the last destination for this x_agent\n elif event.key == pygame.K_z:\n if runtime_bucket[\"playing_moment\"]:\n continue\n this_moment_data = moment_data[runtime_bucket[\"cur_moment_idx\"]]\n p_id = runtime_bucket[\"selected\"]\n\n if p_id in this_moment_data[\"x_agents\"]:\n if this_moment_data[\"x_agents\"][p_id]:\n print(\"Deleted person #%s destination.\" % p_id)\n del this_moment_data[\"x_agents\"][p_id][-1]\n\n # get a big dict of what key is pressed now, so to avoid hitting forward\n # multiple times to go forward for a distance\n step = 0.1 * ms_since_last_tick # this is from experimenting\n keys = pygame.key.get_pressed()\n if keys[pygame.K_w]:\n args_.spectator.set_location(prev_location + step * global_forward_vector)\n if keys[pygame.K_s]:\n args_.spectator.set_location(prev_location - step * global_forward_vector)\n if keys[pygame.K_a]:\n args_.spectator.set_location(prev_location + step * left_vector)\n if keys[pygame.K_d]:\n args_.spectator.set_location(prev_location - step * left_vector)\n if keys[pygame.K_u]:\n args_.spectator.set_location(prev_location + step * 0.5 * global_up_vector)\n if keys[pygame.K_i]:\n args_.spectator.set_location(prev_location - step * 0.5 * global_up_vector)\n if keys[pygame.K_UP]:\n args_.spectator.set_transform(carla.Transform(\n rotation=carla.Rotation(pitch=prev_rotation.pitch + 1.0,\n yaw=prev_rotation.yaw,\n roll=prev_rotation.roll),\n location=prev_location))\n if keys[pygame.K_DOWN]:\n args_.spectator.set_transform(carla.Transform(\n rotation=carla.Rotation(pitch=prev_rotation.pitch - 1.0,\n yaw=prev_rotation.yaw,\n roll=prev_rotation.roll),\n location=prev_location))\n if keys[pygame.K_LEFT]:\n args_.spectator.set_transform(carla.Transform(\n rotation=carla.Rotation(pitch=prev_rotation.pitch,\n yaw=prev_rotation.yaw - 1.0,\n roll=prev_rotation.roll),\n location=prev_location))\n if keys[pygame.K_RIGHT]:\n args_.spectator.set_transform(carla.Transform(\n rotation=carla.Rotation(pitch=prev_rotation.pitch,\n yaw=prev_rotation.yaw + 1.0,\n roll=prev_rotation.roll),\n location=prev_location))\n\n return False", "def main(): #Main Control Loop (as prototyped on 2/26 in Glennan Lounge)\n\t# Create listener to receive info from UI\n\tui_listener = pso_network.UIListener()\n\tui_listener.daemon = True\n\tui_listener.start()\n\tui_state = ui_listener.get_ui()\n\t\n\t# Create listener to recieve waypoints and corrections from planner.\n\tplanner_listener = pso_network.PlannerListener()\n\tplanner_listener.daemon = True\n\tplanner_listener.start()\n\twaypoint = cv.CreateMat(4, 1, cv.CV_32FC1)\n\t\n\t#Instatiate Drone Objects (defined in Drone.py)\n\tmyDrone = Drone(\"192.168.1.1\")\n\t\n\t\n\t#Preset flags\n\trunning = True\n\twait_on_emergency = False\n\twait_on_liftoff = False\n\twait_on_land = False\n\t\n\t#Create Kalman filter, state, and command vectors\n\tkalman = PsoKalman()\n\tu = cv.CreateMat(4, 1, cv.CV_32FC1)\n\tz = cv.CreateMat(5, 1, cv.CV_32FC1)\n\tsys_time = time.time()\n\t\n\t#Create PID controllers for each axis\n\tyaw_pid = pso_pid.PID()\n\tyaw_pid.k = 1.5\n\tyaw_pid.t_i = 1.\n\tyaw_pid.angular = True\n\tyaw_pid.deadband = .05\n\t\n\tz_pid = pso_pid.PID()\n\tz_pid.k = .00075\n\tz_pid.i_enable = False\n\tz_pid.t_i = 10.\n\tz_pid.deadband = 150\n\t\n\troll_pid = pso_pid.PID()\n\troll_pid.k = .00025\n\troll_pid.i_enable = False\n\troll_pid.deadband = 50\n\t\n\tpitch_pid = pso_pid.PID()\n\tpitch_pid.k = .00025\n\tpitch_pid.i_enable = False\n\tpitch_pid.deadband = 50\n\t\n\t#Logger puts state in csv for matlab-y goodness\n\tlogger = debuglogger.Logger()\n\t\n\t#Fig bucking loop\n\twhile(running):\n\t\ttime.sleep(.05)\n\t\tos.system(\"clear\")\n\t\t\n\t\t#Get command state from UI\n\t\tprev_ui_state = ui_state\n\t\tui_state = ui_listener.get_ui()\n\t\t\t\t\n\t\tif ui_state[EMERGENCY]:\n\t\t\tmyDrone.emergency()\n\t\t\n\t\tif ui_state[SHUTDOWN]:\n\t\t\t#UI has ordered shutdown\n\t\t\tprint \"Shutting down control loop...\"\n\t\t\tui_listener.stop()\n\t\t\tmyDrone.kill()\n\t\t\trunning = False\n\t\t\n\t\tif ui_state[TRIM]:\n\t\t\tmyDrone.trim()\n\t\t\tui_listener.clear_flag(TRIM)\n\t\t\tprint \"\\nTRIM\\n\"\n\t\t\n\t\tif ui_state[FLYING]:\n\t\t\tmyDrone.takeoff()\n\t\t\tprint \"Taking Off/Flying\"\n\t\t\tif not prev_ui_state[FLYING]:\n\t\t\t\twait_on_liftoff = 5\n\t\telse:\n\t\t\tmyDrone.land()\n\t\t\tprint \"Landing/Landed\"\n\t\t\tif prev_ui_state[FLYING]:\n\t\t\t\twait_on_land = 5\n\t\t\n\t\tif ui_state[RESET]:\n\t\t\tmyDrone.reset_emergency()\n\t\t\tmyDrone.reset()\n\t\t\tyaw_pid.flush()\n\t\t\tz_pid.flush()\n\t\t\troll_pid.flush()\n\t\t\tpitch_pid.flush()\n\t\t\tui_listener.clear_flag(RESET)\n\t\t\n\t\t#Get navdata\n\t\tnav = myDrone.get_navdata()\n\t\t\n\t\t#Print out Drone State\n\t\tif nav.check_state(navdata.EMERGENCY):\n\t\t\tprint \"Emergency!\"\n\t\telif not nav.check_state(navdata.COM_WATCHDOG):\n\t\t\tprint \"WATCHDOG\"\n\t\telif nav.check_state(navdata.COMMAND):\n\t\t\tprint \"Watchdog cleared. Not yet ready for commands.\"\n\t\telse:\n\t\t\tprint \"Ready to Fly\\n\"\n\t\tprint \"\\t\\tECACAVNAPCUWAPTHLGCMBNTTTCUACVVF\\n{0}\\t\\t{1:32b}\".format(nav.seq,nav.state) #Print navdata state\n\t\t\n\t\t#Update State (Kalman)\n\t\tdt = time.time()-sys_time\n\t\tprint \"dt:\\t\",dt\n\t\tsys_time = time.time()\n\t\tz[0, 0], z[1, 0], z[2, 0], z[3, 0], z[4, 0] = nav.vx, nav.vy, nav.z, nav.vz, nav.psi\n\t\t#z and u need to be cv matrices!!!!\n\t\tsys_state = myDrone.get_state()\n\t\tprint \"\\nDrone Kalman State:\"\n\t\tprint \"x:\\t{0}\".format(sys_state[0, 0])\n\t\tprint \"y:\\t{0}\".format(sys_state[2, 0])\n\t\tprint \"z:\\t{0}\".format(sys_state[4, 0])\n\t\tprint \"vx:\\t{0}\".format(sys_state[1, 0])\n\t\tprint \"vy:\\t{0}\".format(sys_state[3, 0])\n\t\tprint \"vz:\\t{0}\".format(sys_state[5, 0])\n\t\tprint \"theta:\\t{0}\".format(sys_state[6, 0])\n\t\tprint \"vtheta:\\t{0}\".format(sys_state[7, 0])\n\t\t\n\t\tprint \"\\nNavdata Euler Angles:\"\n\t\tprint \"theta:\\t\",nav.theta\n\t\tprint \"phi:\\t\",nav.phi\n\t\tprint \"psi:\\t\",nav.psi\n\t\tprint \"\\nNavdata Stuff:\"\n\t\tprint \"z:\\t\",nav.z\n\t\tprint \"vx:\\t\",nav.vx\n\t\tprint \"vy:\\t\",nav.vy\n\t\tui_listener.set_state(sys_state, nav)\n\t\t#logger.log(sys_state)\n\t\t\n\t\tif wait_on_liftoff>0:\n\t\t\tprint \"Waiting for liftoff to finish\"\n\t\t\twait_on_liftoff -= dt\n\t\t\tu[0, 0], u[1, 0], u[2, 0], u[3, 0] = 0, 0, 1, 0#Assume drone goes full speed up when taking off\n\t\telif ui_state[FLYING]:\n\t\t\tprint \"\" #Blank line to everything lines up\n\t\t\t#If Drone is in waypoint mode, compute command\n\t\t\tif not ui_state[OVERRIDE]:\n\t\t\t\t#Get waypoint\n\t\t\t\tif not planner_listener.waypoints.empty():\n\t\t\t\t\twaypoint = planner_listener.waypoints.get()\n\t\t\t\tprint \"\\nNext Waypoint:\"\n\t\t\t\tprint \"X:\\t\", waypoint[0, 0]\n\t\t\t\tprint \"Y:\\t\", waypoint[1, 0]\n\t\t\t\tprint \"Z:\\t\", waypoint[2, 0]\n\t\t\t\tprint \"θ:\\t\", waypoint[3, 0]\n\t\t\t\t#Compute command\n\t\t\t\t(roll_des, pitch_des) = world2drone(waypoint[0, 0]-sys_state[0, 0], waypoint[1, 0]-sys_state[2, 0], sys_state[6, 0])\n\t\t\t\tprint \"Desired Roll:\\t\", roll_des\n\t\t\t\tprint \"Desired Pitch:\\t\", pitch_des\n\t\t\t\tu[0, 0] = pitch_pid.update(0, pitch_des)\n\t\t\t\tu[1, 0] = roll_pid.update(0, roll_des)\n\t\t\t\tu[2, 0] = z_pid.update(sys_state[4, 0], waypoint[2, 0])\n\t\t\t\tu[3, 0] = yaw_pid.update(sys_state[6, 0], waypoint[3, 0])\n\t\t\t\tmyDrone.go(u[0, 0], u[1, 0], u[3, 0], u[2, 0])\n\t\t\telse: #Manual override: Use command from UI state\n\t\t\t\tprint \"\\nManual override mode\\n\\n\\n\"\n\t\t\t\tmyDrone.go(ui_state[COMMAND][0], ui_state[COMMAND][1], ui_state[COMMAND][2], ui_state[COMMAND][3])\n\t\t\t\tu[0, 0], u[1, 0], u[2, 0], u[3, 0] = ui_state[COMMAND]\n\t\telse:\n\t\t\tprint \"\\nLanded\"\n\t\t\n\t\t#Print out commands\n\t\tprint \"\\nCommands:\\npitch:\\t\",u[0, 0]\n\t\tprint \"roll:\\t\", u[1, 0]\n\t\tprint \"gaz:\\t\", u[2, 0]\n\t\tprint \"yaw:\\t\", u[3, 0]", "def ev_joyhatmotion(self, event: tcod.event.JoystickHat) -> T | None:", "def process_scan(self, msg):\n if len(msg.ranges) <= 330:\n # throw out scans that don't have more than 90% of the data\n return\n # get pose according to the odometry\n p = PoseStamped(header=Header(stamp=msg.header.stamp, frame_id=\"base_link\"), pose=Pose())\n self.odom_pose = self.tf_listener.transformPose(\"odom\", p)\n self.base_pose = self.tf_listener.transformPose(\"base_laser_link\", p)\n # convert the odom pose to the tuple (x,y,theta)\n self.odom_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.odom_pose.pose)\n #(-0.0069918, 0.000338577, 0.048387097)\n #(1.0208817, 0.04827240, 0.048387)\n self.base_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.base_pose.pose)\n for i in range(len(msg.ranges)):\n if 0.0 < msg.ranges[i] < 5.0: #for any reding within 5 meters\n #Using the pose and the measurement nd the angle, find it in the world\n map_x = self.odom_pose[0] + msg.ranges[i] * cos(i * pi / 180.0 + self.odom_pose[2])\n map_y = self.odom_pose[1] + msg.ranges[i] * -sin(i * pi / 180.0 + self.odom_pose[2])\n\n #Relate that map measure with a place in the picture\n x_detect = int((map_x - self.origin[0]) / self.resolution)\n y_detect = int((map_y - self.origin[1]) / self.resolution)\n\n\n #Determine how to mark the location in the map, along with the stuff inbetween\n u = (map_x - self.odom_pose[0], map_y - self.odom_pose[1])\n magnitude = sqrt(u[0] ** 2 + u[1] ** 2)\n n_steps = max([1, int(ceil(magnitude / self.resolution))])\n u_step = (u[0] / (n_steps - 1), u[1] / (n_steps - 1))\n marked = set()\n for i in range(n_steps):\n curr_x = self.odom_pose[0] + i * u_step[0]\n curr_y = self.odom_pose[1] + i * u_step[1]\n if not (self.is_in_map(curr_x, curr_y)):\n break\n\n x_ind = int((curr_x - self.origin[0]) / self.resolution)\n y_ind = int((curr_y - self.origin[1]) / self.resolution)\n if x_ind == x_detect and y_ind == y_detect:\n break\n if not ((x_ind, y_ind) in marked):\n # odds ratio is updated according to the inverse sensor model\n self.odds_ratios[x_ind, y_ind] *= self.p_occ / (1 - self.p_occ) * self.odds_ratio_miss\n marked.add((x_ind, y_ind))\n if self.is_in_map(map_x, map_y):\n # odds ratio is updated according to the inverse sensor model\n self.odds_ratios[x_detect, y_detect] *= self.p_occ / (1 - self.p_occ) * self.odds_ratio_hit\n\n self.seq += 1\n # to save time, only publish the map every 10 scans that we process\n if self.seq % 10 == 0:\n # make occupancy grid\n map = OccupancyGrid()\n map.header.seq = self.seq\n self.seq += 1\n map.header.stamp = msg.header.stamp\n map.header.frame_id = \"map\" # the name of the coordinate frame of the map\n map.info.origin.position.x = self.origin[0]\n map.info.origin.position.y = self.origin[1]\n map.info.width = self.n\n map.info.height = self.n\n map.info.resolution = self.resolution\n map.data = [0] * self.n ** 2 # map.data stores the n by n grid in row-major order\n for i in range(self.n):\n for j in range(self.n):\n idx = i + self.n * j # this implements row major order\n if self.odds_ratios[i, j] < 1 / 5.0: # consider a cell free if odds ratio is low enough\n map.data[idx] = 0\n elif self.odds_ratios[i, j] > 5.0: # consider a cell occupied if odds ratio is high enough\n map.data[idx] = 100\n else: # otherwise cell is unknown\n map.data[idx] = -1\n self.pub.publish(map)\n\n # create the image from the probabilities so we can visualize using opencv\n im = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1], 3))\n for i in range(im.shape[0]):\n for j in range(im.shape[1]):\n if self.odds_ratios[i, j] < 1 / 5.0:\n im[i, j, :] = 1.0\n elif self.odds_ratios[i, j] > 5.0:\n im[i, j, :] = 0.0\n else:\n im[i, j, :] = 0.5\n\n # compute the index of the odometry pose so we can mark it with a circle\n x_odom_index = int((self.odom_pose[0] - self.origin[0]) / self.resolution)\n y_odom_index = int((self.odom_pose[1] - self.origin[1]) / self.resolution)\n\n x_base_index = int((self.base_pose[0] - self.origin[0] - 1) / self.resolution)\n y_base_index = int((self.base_pose[1] - self.origin[1]) / self.resolution)\n\n\n # computer the ball locations so we can mark with a colored circle\n #TODO Track and relate the robot's angle pose for accuracy\n\n if self.depth_red > 0:\n self.y_camera_red = int(x_odom_index - self.depth_red * cos(self.angle_diff_red + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_red = int(y_odom_index - self.depth_red * sin(self.angle_diff_red + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_red, self.y_camera_red), 1, self.red)\n\n real_red_y = self.depth_red * cos(self.angle_diff_red + pi - self.odom_pose[2])\n real_red_x = self.depth_red * sin(self.angle_diff_red + pi - self.odom_pose[2])\n\n self.rcoor_pub.publish(Vector3(-real_red_x, -real_red_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_red, self.y_camera_red), 1, self.red)\n\n if self.depth_blue > 0:\n self.y_camera_blue = int(x_odom_index - self.depth_blue * cos(self.angle_diff_blue + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_blue = int(y_odom_index - self.depth_blue * sin(self.angle_diff_blue + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_blue, self.y_camera_blue), 1, self.blue)\n\n real_blue_y = self.depth_blue * cos(self.angle_diff_blue + pi - self.odom_pose[2])\n real_blue_x = self.depth_blue * sin(self.angle_diff_blue + pi - self.odom_pose[2])\n\n self.bcoor_pub.publish(Vector3(-real_blue_x, -real_blue_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_blue, self.y_camera_blue), 1, self.blue)\n\n if self.depth_green > 0:\n self.y_camera_green = int(x_odom_index - self.depth_green * cos(self.angle_diff_green + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_green = int(y_odom_index - self.depth_green * sin(self.angle_diff_green + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_green, self.y_camera_green), 1, self.green)\n \n real_green_y = self.depth_green * cos(self.angle_diff_green + pi - self.odom_pose[2])\n real_green_x = self.depth_green * sin(self.angle_diff_green + pi - self.odom_pose[2])\n\n self.gcoor_pub.publish(Vector3(-real_green_x, -real_green_y/2, 0))\n\n if self.depth_yellow > 0:\n self.y_camera_yellow = int(x_odom_index - self.depth_yellow * cos(self.angle_diff_yellow + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_yellow = int(y_odom_index - self.depth_yellow * sin(self.angle_diff_yellow + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_yellow, self.y_camera_yellow), 1, self.yellow)\n \n real_yellow_y = self.depth_yellow * cos(self.angle_diff_yellow + pi - self.odom_pose[2])\n real_yellow_x = self.depth_yellow * sin(self.angle_diff_yellow + pi - self.odom_pose[2])\n\n self.ycoor_pub.publish(Vector3(-real_yellow_x, -real_yellow_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_yellow, self.y_camera_yellow), 1, self.yellow)\n\n # draw the robot\n cv2.circle(im, (y_odom_index, x_odom_index), 2, (255, 0, 0))\n \n # display the image resized\n cv2.imshow(\"map\", cv2.resize(im, (500, 500)))\n cv2.waitKey(20)", "def main():\n # Placing imports here so it will be imported only if user want to test algorithm, not when importing\n # Class DepthCameraServer\n\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n import sensors_classes as sensors\n from images_processing_class import ImagesProcessing\n import struct\n import time\n\n # Starting Thread which receives data from VideoCamera, port od thread's socket must be the same as the port at\n # which data from VideoCamera is redirected, to be sure check where VideoCamera data stream is send in script env.py\n depth_camera_server = DepthCameraServer('localhost', 60012)\n depth_camera_server.run()\n\n pose_server = sensors.Pose_server('localhost', 60007)\n pose_server.run()\n\n # Waiting 1 sec to be sure than depth_camera_server has received minimum 1 image, because program will crash if\n # depth_camera_server doesn't have time to receive an image\n time.sleep(1)\n\n points = depth_camera_server.get_points()\n\n lista_punktow = []\n x = []\n y = []\n z = []\n\n data_pose_dict = pose_server.get_all()\n pose_x = data_pose_dict['x']\n pose_y = data_pose_dict['y']\n pose_z = data_pose_dict['z']\n\n yawp = data_pose_dict['yaw']\n pitchp = data_pose_dict['pitch']\n rollp = data_pose_dict['roll']\n\n # Each 3D point is a set of float(x,y,z). Each point has a size of 12 bytes because\n # 3*sizeof(float) = 12 bytes, that's why we are dividing data into parts with size of 12 and then\n # converting this data to tuple with 3 float (xyz).\n\n #\n # Processing cloud of points to seperate x, y and z was copied from dcam_old.py\n #\n\n for i in range(0, len(points) - 12, 12):\n xyz = struct.unpack('fff', points[i:i + 12])\n\n # rotation is included\n x1p, y1p, z1p = rotation(xyz[2], xyz[0], xyz[1], yawp, pitchp, rollp)\n\n # data from pose is included\n xp = round(x1p + pose_x, 1)\n yp = round(y1p + pose_y, 1)\n zp = round(z1p + pose_z, 1)\n temp = [xp, yp, zp]\n lista_punktow.append(temp)\n\n # Choosing only these points which have minimum 0.45 meters at z-axis, but why???\n for i in lista_punktow:\n x.append(i[0])\n y.append(i[1])\n z.append(i[2])\n\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x, y, z, cmap='viridis', linewidth=0.5)\n ax.scatter(x[0], y[0], z[0], c='red')\n ax.scatter(x[1], y[1], z[1], c='yellow')\n ax.scatter(x[2], y[2], z[2], c='black')\n ax.scatter(pose_x, pose_y, pose_z, c='green')\n plt.show()", "def __init__(self, name, freq, waypoint_specified, waypoint_bc):\n self.dt = 1.0/freq\n self.uav = name\n self.wp_specfied = waypoint_specified\n self.wp_bc = waypoint_bc\n self.start_time = time.time()\n self.average_speed = 3.0\n self.reduced_speed = 0.5\n # specify start/intermediate/end points and its deviratives \n self.no_of_segments = 7\n self.wp_callback_counter = 0\n self.trajectory_constructed = False\n\n self.r = 4 # corresponding to snap which is 4th derivative\n self.N = 7# degree of polynomial \n \n self.pub = rospy.Publisher('/'+self.uav+'/PolynomialTrajectory', PolynomialTrajectory, queue_size = 1, tcp_nodelay = True)\n \n rospy.Subscriber('/'+self.uav + '/odometry_sensor1/odometry', Odometry, self.currentstate_callback, queue_size = 1, tcp_nodelay = True) \n rospy.Subscriber('/'+self.uav+'/waypoint_publisher', Pose, self.waypoint_callback, queue_size = 1, tcp_nodelay=True)\n\n #try: \n # rospy.Subscriber('/'+self.uav+'/waypoint_publisher', Pose, self.waypoint_callback, queue_size = 1, tcp_nodelay=True)\n # rospy.Subscriber('/'+self.uav + '/odometry_sensor/odometry', Odometry, self.currentstate_callback, queue_size = 1, tcp_nodelay = True)\n #except: \n # print 'Either waypoints or odometry is not available.'", "def process_movement(self, steering, distance, motion_noise):\n \n self.bearing = truncate_angle(self.bearing + float(steering))\n # print self.Robot.find_next_point(steering, distance)\n\n print \"motion noise \", motion_noise\n\n self.x = self.x + (distance * math.cos(self.bearing))\n self.y = self.y + (distance * math.sin(self.bearing))\n # print \"x and y\", self.x, self.y, self.bearing\n # TODO\n # raise NotImplementedError\n\n x = 0.0\n y = 0.0\n\n return self.x, self.y", "def wemo_process(self, msg):\n if msg[\"content\"][\"command\"] == \"nickname\":\n # print msg\n self.nickname = msg[\"content\"][\"value\"]\n self.controller.sending(\n {\"subject\": \"control\" + \".\" + self.controller.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"nickname\",\n \"target\": self.controller.type + \".\" + self.name,\n #\"token\": self.controller.target,\n \"value\": {\"name\": self.name, \"nickname\": msg[\"content\"][\"value\"]}}})\n elif msg[\"content\"][\"command\"] == \"status\":\n # Not gone the way of the dodo\n # try:\n self.controller.sending({\"subject\": self.controller.type,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"status\",\n \"target\": self.controller.type +\n \".\" +\n self.name,\n \"icon status\":\n {\"bu-radar1\": {\"fill\":\"black\", \"opacity\":\"1\"},\n \"bu-radar2\": {\"fill\":cssColour(), \"opacity\":\"0\"},\n \"bu-not-present\": {\n \"opacity\": 0}},\n \"value\": {}}})\n # except: #Most probably is known but we lost pairing\n # pass\n\n\n return None", "def process_current_time(self):\n if self.new_input:\n cuda.Context.synchronize()\n self.activate()\n self.new_input = False\n if self.plastic:\n self.learn()", "def motion_f(input_line, cur, count):\n return start_catching_keys(1, \"cb_motion_f\", input_line, cur, count)", "def track(self, image):\n\n keyframe_image = self.images[self.keyframe_inds[-1]]\n images = np.stack([keyframe_image, image], axis=0)\n\n keyframe_pose = self.poses[self.keyframe_inds[-1]]\n poses = np.stack([keyframe_pose, self.pose_cur], axis=0)\n\n keyframe_depth = self.depths[self.keyframe_inds[-1]]\n depths = keyframe_depth[np.newaxis]\n\n edges = np.array([[0,1]], dtype=np.int32)\n fixed = np.int32(0)\n\n feed_dict = {\n self.images_placeholder: images,\n self.depths_placeholder: depths,\n self.poses_placeholder: poses,\n self.edges_placeholder: edges,\n self.fixed_placeholder: fixed,\n self.init_placeholder: False,\n self.intrinsics_placeholder: self.intrinsics}\n\n updated_poses = self.sess.run(self.outputs['poses'], feed_dict=feed_dict)\n\n # relative pose between keyframe and new pose\n dP = np.matmul(updated_poses[1], np.linalg.inv(updated_poses[0])) \n self.pose_cur = np.matmul(dP, keyframe_pose)\n\n return pose_distance(dP)", "def odom_update(self, data):\n self, curr_pos = (data.pose.pose.position.x, data.pose.pose.position.y)", "def player_movement(self):", "def process_event(event, assistant):\n if event.type == EventType.ON_CONVERSATION_TURN_STARTED:\n print()\n # print('atencion')\n print(event)\n # print('atecion fin')\n\n if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and\n event.args and not event.args['with_follow_on_turn']):\n print()\n if event.type == EventType.ON_DEVICE_ACTION:\n for command, params in event.actions:\n print('Do command', command, 'with params', str(params))\n\n if event.type == EventType.ON_DEVICE_ACTION:\n for command, params in event.actions:\n print('Do command', command, 'with params', str(params))\n\n # Add the following lines after the existing line above:\n\n if command == \"com.example.actions.fotografia\":\n print('comando')\n # if event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:\n # print(event.args['text'])\n # if event.args['text'] in local_commands:\n if event.type == EventType.ON_RENDER_RESPONSE:\n if event.args['text'] in 'Estoy procesando tu imagen, amuleto que dices?':\n assistant.stop_conversation()\n print('exito')\n\n imgname = ''\n with picamera.PiCamera() as camera:\n imgname = uuid.uuid4().hex.upper()[0:6]\n camera.start_preview()\n time.sleep(5)\n camera.capture('/home/pi/Desktop/' + imgname + '.jpg')\n camera.stop_preview()\n\n image = tf.keras.preprocessing.image.img_to_array(\n tf.keras.preprocessing.image.load_img('/home/pi/Desktop/' + imgname + '.jpg',\n target_size=(128, 128))) / 255.\n\n payload = {\n \"instances\": [{input_name: image.tolist()}]\n }\n r = requests.post('http://' + host + '/v1/models/' + model_name + ':predict', json=payload)\n print(r)\n\n flowers = np.array(['margarita', 'diente de leon', 'rosa', 'girasol', 'tulipan'], dtype=np.object)\n try:\n info = json.loads(r.content.decode('utf-8'))\n print(r.content.decode('utf-8'))\n for url in info[\"predictions\"]:\n cap = (np.array(url) > 0.6).astype(np.bool)\n print(cap)\n cap2 = flowers[cap][0]\n\n synthesis_input = texttospeech.types.SynthesisInput(\n text=\"Estoy contemplando una muy linda flor, es mas problable que sea \" + cap2)\n\n voice = texttospeech.types.VoiceSelectionParams(\n language_code='es-ES',\n ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)\n\n audio_config = texttospeech.types.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3)\n\n response = client.synthesize_speech(synthesis_input, voice, audio_config)\n\n with open(imgname + '.mp3', 'wb') as out:\n out.write(response.audio_content)\n print('Audio content written to file \"output.mp3\"')\n\n os.system('mpg321 -g 20 ' + imgname + '.mp3 &')\n except Exception as e:\n synthesis_input = texttospeech.types.SynthesisInput(text=\"No estoy seguro de lo que me muestras\")\n voice = texttospeech.types.VoiceSelectionParams(\n language_code='es-ES',\n ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)\n\n audio_config = texttospeech.types.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3)\n\n response = client.synthesize_speech(synthesis_input, voice, audio_config)\n\n # The response's audio_content is binary.\n with open(imgname + '.mp3', 'wb') as out:\n out.write(response.audio_content)\n\n os.system('mpg321 -g 20 ' + imgname + '.mp3 &')\n print(\"type error: \" + str(e))" ]
[ "0.5876557", "0.5821907", "0.5732931", "0.56815475", "0.56735855", "0.56616944", "0.5590481", "0.5567074", "0.54753345", "0.5468342", "0.54676706", "0.53700924", "0.5357709", "0.5344554", "0.53419507", "0.53375906", "0.5330831", "0.53234816", "0.5321119", "0.5305486", "0.53044", "0.5289346", "0.52744627", "0.52706796", "0.524685", "0.52395785", "0.5229326", "0.52236766", "0.52113235", "0.5195841", "0.51878816", "0.51865137", "0.51817024", "0.5144472", "0.51440936", "0.5143034", "0.5130936", "0.51302624", "0.5124791", "0.51202315", "0.51153356", "0.51108027", "0.5108517", "0.51036966", "0.5091151", "0.5089718", "0.50844085", "0.50844085", "0.50844085", "0.50820714", "0.50691706", "0.50675637", "0.5066697", "0.5065444", "0.5057285", "0.505594", "0.50406265", "0.50369245", "0.50240314", "0.50226486", "0.5021721", "0.5018371", "0.5015292", "0.5012819", "0.5008005", "0.500284", "0.4992432", "0.49898264", "0.49767062", "0.49762285", "0.4972195", "0.49675518", "0.49664727", "0.4962803", "0.4961997", "0.49612108", "0.49599075", "0.49589497", "0.49583215", "0.49577978", "0.49532223", "0.49490547", "0.4944066", "0.4941179", "0.49373302", "0.49354228", "0.4927151", "0.49252996", "0.4923365", "0.4918306", "0.49157372", "0.4915671", "0.49143937", "0.49114335", "0.49107295", "0.49088085", "0.49060616", "0.49044168", "0.4892031", "0.4888367" ]
0.5297739
21
Returns a point cloud
def triangulation(self, kp_a, kp_b, cam_a, cam_b): reproj_error = [] point_cloud = [] for i in range(len(kp_a)): # convert to normalized homogeneous coordinates kp = kp_a[i] u = np.array([kp[0], kp[1], 1.0]) mat_um = self.k_inv * np.matrix(u).T u = np.array(mat_um[:, 0]) kp_ = kp_b[i] u_ = np.array([kp_[0], kp_[1], 1.0]) mat_um_ = self.k_inv * np.matrix(u_).T u_ = np.array(mat_um_[:, 0]) # now we triangulate! x = self.linear_ls_triangulation( u, cam_a, u_, cam_b ) point_cloud.append(x.flatten()) # calculate reprojection error # reproject to other img x_for_camera = np.matrix( np.append(x, [[1.0]], axis=0) ) x_pt_img = np.array(self.k_mat * cam_b * x_for_camera).flatten() x_pt_img_ = np.array([ x_pt_img[0] / x_pt_img[2], x_pt_img[1] / x_pt_img[2] ]) # check error in matched keypoint reproj_error.append( np.linalg.norm(x_pt_img_ - kp_) ) return reproj_error, point_cloud
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def point_cloud(self):\n\t\tgen = self.loop(point_cloud=True)\n\t\tpoint_cloud = next(gen)\n\t\treturn point_cloud", "def convertcloud(points):\n pcd = open3d.geometry.PointCloud()\n pcd.points = open3d.utility.Vector3dVector(points)\n return pcd", "def as_point_cloud(self):\n far = 1000.0 # max depth in meters.\n intrinsic_mat = self.camera_setup.get_intrinsic_matrix()\n width, height = self.camera_setup.width, self.camera_setup.height\n # 2d pixel coordinates\n pixel_length = width * height\n u_coord = repmat(np.r_[0:width:1], height, 1).reshape(pixel_length)\n v_coord = repmat(np.c_[0:height:1], 1, width).reshape(pixel_length)\n normalized_depth = np.reshape(self.frame, pixel_length)\n\n # p2d = [u,v,1]\n p2d = np.array([u_coord, v_coord, np.ones_like(u_coord)])\n\n # P = [X,Y,Z]\n p3d = np.dot(inv(intrinsic_mat), p2d)\n p3d *= normalized_depth * far\n\n # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]\n locations = np.asarray(np.transpose(p3d))\n # Transform the points in 3D world coordinates.\n to_world_transform = self.camera_setup.get_unreal_transform()\n point_cloud = to_world_transform.transform_points(locations)\n return point_cloud", "def get_point_cloud(self):\n tsdf_vol, color_vol = self.get_volume()\n\n # Marching cubes\n verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0]\n verts_ind = np.round(verts).astype(int)\n verts = verts * self._voxel_size + self._vol_origin\n\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / self._color_const)\n colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)\n colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n\n pc = np.hstack([verts, colors])\n return pc", "def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()", "def create_pointcloud(pts):\n depths = PointCloud()\n depths.header = std_msgs.msg.Header()\n depths.header.stamp = rospy.Time.now()\n depths.header.frame_id = \"view_zero\"\n depths.points = [None] * len(pts)\n for p in xrange(len(pts)):\n #Giving point the same orientation as the robot\n y = pts[p,0]\n z = - pts[p,1] #y in images is down\n x = pts[p,2]\n depths.points[p] = Point(x, y, z)\n return depths", "def make_point_cloud(self):\r\n\r\n self.pointCloud = VtkPointCloud()\r\n for k in range(np.size(self.pos, 0)):\r\n self.pointCloud.addPoint(self.pos[k, :])\r\n\r\n # Renderer\r\n renderer = vtk.vtkRenderer()\r\n renderer.AddActor(self.pointCloud.vtkActor)\r\n renderer.SetBackground(.2, .3, .4)\r\n renderer.SetBackground(0.0, 0.0, 0.0)\r\n renderer.ResetCamera()\r\n\r\n # Render Window\r\n renderWindow = vtk.vtkRenderWindow()\r\n renderWindow.AddRenderer(renderer)\r\n\r\n # Interactor\r\n renderWindowInteractor = vtk.vtkRenderWindowInteractor()\r\n renderWindowInteractor.SetRenderWindow(renderWindow)\r\n\r\n # Begin Interaction\r\n renderWindow.Render()\r\n renderWindow.SetWindowName(\"XYZ Data Viewer: \")\r\n renderWindowInteractor.Start()", "def draw_pointcloud(ax, example):\n points = example['points'].cpu().detach().numpy()\n points_num = len(points)\n xs = np.empty([points_num])\n ys = np.empty([points_num])\n zs = np.empty([points_num])\n intensity = np.empty([len(points)])\n for j, point in enumerate(points):\n xs[j] = point[1]\n ys[j] = point[2]\n zs[j] = point[3]\n intensity[j] = point[4]\n\n intensity = intensity\n ax.scatter3D(xs, ys, zs, c=intensity, marker='.', s=0.3, cmap=plt.get_cmap('jet'))", "def point_cloud(self, X, Y, Z, size=1, color='#FF3232', bordercolor='#FF3232', legend='', width=0.5, opacity=1.0):\n point_cloud = go.Scatter3d(\n x=X,\n y=Y,\n z=Z,\n # showlegend=False,\n name=legend,\n mode='markers',\n marker=dict(\n size=size,\n color=color,\n line=dict(\n color=bordercolor,\n width=width\n ),\n # opacity=opacity\n )\n )\n\n return point_cloud", "def voxelize(points,leaf = 0.1):\n if (type(points) == pclpy.pcl.PointCloud.PointXYZRGB):\n cloud = points\n voxel_filter = pclpy.pcl.filters.VoxelGrid.PointXYZRGB()\n filtered_pointcloud = pclpy.pcl.PointCloud.PointXYZRGB()\n else:\n cloud = pclpy.pcl.PointCloud.PointXYZ(points)\n voxel_filter = pclpy.pcl.filters.VoxelGrid.PointXYZ()\n filtered_pointcloud = pclpy.pcl.PointCloud.PointXYZ()\n \n voxel_filter.setLeafSize(leaf,leaf,leaf)\n voxel_filter.setInputCloud(cloud)\n \n voxel_filter.filter(filtered_pointcloud)\n if type(points) == pclpy.pcl.PointCloud.PointXYZRGB:\n return filtered_pointcloud\n else:\n return filtered_pointcloud.xyz", "def get_cloud(self):\n self.has_cloud = False\n while not self.has_cloud:\n rospy.sleep(0.01)\n\n # cloud_time = self.active_cloud_msg.header.stamp\n # cloud_frame = self.active_cloud_msg.header.frame_id\n cloud = np.array(list(point_cloud2.read_points(self.active_cloud_msg)))[:, 0:3]\n mask = np.logical_not(np.isnan(cloud).any(axis=1))\n cloud = cloud[mask]\n\n print 'received cloud with {} points.'.format(cloud.shape[0])\n return cloud", "def format_point_cloud(self, pc, num_points):\n resized_pc = np.zeros([num_points, 3])\n if len(pc) > num_points:\n # Randomly sample point cloud to reduce size\n resized_pc = pc[np.random.choice(pc.shape[0], num_points,\n replace=False), :]\n elif len(pc) < num_points:\n # Duplicate last point to fill point cloud. Because of the max function\n # the duplicated points will not affect the output\n resized_pc[:len(pc)] = pc\n resized_pc[len(pc):] = pc[-1]\n else:\n resized_pc = pc\n\n return resized_pc", "def test_get_mesh_grid_as_point_cloud_single_pt() -> None:\n min_x = -3 # integer, minimum x-coordinate of 2D grid\n max_x = -3 # integer, maximum x-coordinate of 2D grid\n min_y = 2 # integer, minimum y-coordinate of 2D grid\n max_y = 2 # integer, maximum y-coordinate of 2D grid\n\n # return pts, a Numpy array of shape (N,2)\n pts = mesh_grid_utils.get_mesh_grid_as_point_cloud(\n min_x, max_x, min_y, max_y, downsample_factor=1.0\n )\n\n assert pts.shape == (1, 2)\n gt_pts: NDArrayFloat = np.array([[-3.0, 2.0]])\n\n assert np.allclose(gt_pts, pts)", "def geometries(self) -> List[geometry.Geometry]:\n geometries = []\n for dimension, cost_fn in itertools.zip_longest(\n range(self.grid_dimension), self.cost_fns, fillvalue=self.cost_fns[-1]\n ):\n x_values = self.x[dimension][:, jnp.newaxis]\n geom = pointcloud.PointCloud(\n x_values,\n cost_fn=cost_fn,\n epsilon=self._epsilon_init,\n )\n geometries.append(geom)\n return geometries", "def generate_pointcloud(rgb_file, mask_file,depth_file,ply_file):\n rgb = Image.open(rgb_file)\n # depth = Image.open(depth_file)\n depth = Image.open(depth_file).convert('I')\n mask = Image.open(mask_file).convert('I')\n\n # if rgb.size != depth.size:\n # raise Exception(\"Color and depth image do not have the same resolution.\")\n # if rgb.mode != \"RGB\":\n # raise Exception(\"Color image is not in RGB format\")\n # if depth.mode != \"I\":\n # raise Exception(\"Depth image is not in intensity format\")\n\n\n points = [] \n for v in range(rgb.size[1]):\n for u in range(rgb.size[0]):\n color = rgb.getpixel((u,v))\n # Z = depth.getpixel((u,v)) / scalingFactor\n # if Z==0: continue\n # X = (u - centerX) * Z / focalLength\n # Y = (v - centerY) * Z / focalLength\n if (mask.getpixel((u,v))<55):\n Z = depth.getpixel((u, v))*.22 \n if Z == 0: continue\n Y = .22 * v\n X = .22 * u\n points.append(\"%f %f %f %d %d %d 0\\n\"%(X,Y,Z,color[0],color[1],color[2]))\n file = open(ply_file,\"w\")\n file.write('''ply\nformat ascii 1.0\nelement vertex %d\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nproperty uchar alpha\nend_header\n%s\n'''%(len(points),\"\".join(points)))\n file.close()", "def visualize_point_cloud(points, colors=None, normals=None,\n show_frame=False, frame_size=1.0, frame_origin=(0, 0, 0)):\n pc = np2pcd(points, colors, normals)\n geometries = [pc]\n if show_frame:\n coord_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=frame_size, origin=frame_origin)\n geometries.append(coord_frame)\n o3d.visualization.draw_geometries(geometries)", "def publish_point_cloud(self):\n all_points = [np.zeros((0, 2), np.float32)]\n all_keys = []\n for key in range(len(self.keyframes)):\n pose = self.keyframes[key].pose\n transf_points = self.keyframes[key].transf_points\n all_points.append(transf_points)\n all_keys.append(key * np.ones((len(transf_points), 1)))\n\n all_points = np.concatenate(all_points)\n all_keys = np.concatenate(all_keys)\n sampled_points, sampled_keys = pcl.downsample(\n all_points, all_keys, self.point_resolution\n )\n sampled_xyzi = np.c_[sampled_points, np.zeros_like(sampled_keys), sampled_keys]\n if len(sampled_xyzi) == 0:\n return\n\n if self.save_fig:\n plt.figure()\n plt.scatter(\n sampled_xyzi[:, 0], sampled_xyzi[:, 1], c=sampled_xyzi[:, 3], s=1\n )\n plt.axis(\"equal\")\n plt.gca().invert_yaxis()\n plt.savefig(\"step-{}-map.png\".format(self.current_key - 1), dpi=100)\n plt.close(\"all\")\n\n cloud_msg = n2r(sampled_xyzi, \"PointCloudXYZI\")\n cloud_msg.header.stamp = self.current_keyframe.time\n cloud_msg.header.frame_id = \"map\"\n self.cloud_pub.publish(cloud_msg)", "def _project_pointcloud(self, cloud):\n\n assert isinstance(cloud, PointCloud2)\n\n pc1 = PointCloud()\n pc1.header = cloud.header\n # hack the time! dont move the robot :-0\n pc1.header.stamp = rospy.Time.now()\n \n \n pc1.points = [Point32(*p) for p in pc2.read_points(cloud)]\n\n self._tf_listener.waitForTransform(pc1.header.frame_id,\n self._image_info.tf_frame, \n rospy.Time(0), \n rospy.Duration(4))\n\n image_frame_cloud = self._tf_listener.transformPointCloud (\n self._image_info.tf_frame, \n pc1)\n min_x, max_x, min_y, max_y = 640, 0, 480, 0 # TODO: remove hard coded image size!\n for pt in image_frame_cloud.points:\n u, v = self._image_info.project3dToPixel((pt.x, pt.y, pt.z))\n if v < min_y:\n min_y = int(v)\n if v > max_y:\n max_y = int(v)\n if u < min_x:\n min_x = int(u)\n if u > max_x:\n max_x = int(u)\n location = (((min_x, min_y), (max_x, max_y)))\n rospy.loginfo(\"Transformed cloud into image plane\")\n return location", "def generate_point_cloud(n:int, d:int = 2, seed=1234) -> np.ndarray:\n initial_seed = np.random.get_state()\n np.random.seed(seed)\n points = np.random.rand(n, d)\n np.random.set_state(initial_seed)\n return points", "def create_cloud(header, fields, points):\n\t\n\tcloud_struct = struct.Struct(_get_struct_fmt(False, fields))\n\t\n\tbuff = ctypes.create_string_buffer(cloud_struct.size * len(points))\n\t\n\tpoint_step, pack_into = cloud_struct.size, cloud_struct.pack_into\n\toffset = 0\n\t\n\tfor p in points:\n\t\tpack_into(buff, offset, *p)\n\t\toffset += point_step\n\treturn PointCloud2(header=header,\n\t\t\t\t\t\theight=1,\n\t\t\t\t\t\twidth=len(points),\n\t\t\t\t\t\tis_dense=False,\n\t\t\t\t\t\tis_bigendian=False,\n\t\t\t\t\t\tfields=fields,\n\t\t\t\t\t\tpoint_step=cloud_struct.size,\n\t\t\t\t\t\trow_step=cloud_struct.size * len(points),\n\t\t\t\t\t\tdata=buff.raw)", "def build_colored_pointcloud(pc, classes, random=True):\n\n #Create array with random colors\n if random:\n random_colors = (np.random.rand(classes.max()+1, 4)*255).astype(int)\n random_colors[:, 3] = 255\n else:\n random_colors = all_random_colors[0:classes.max()+1, :]\n\n #One hot encoding for points' classes\n one_hot_classes = np.zeros((classes.shape[0], classes.max()+1)).astype(int)\n one_hot_classes[np.arange(classes.shape[0]), classes] = 1\n\n #Assign colors\n pc_colors = np.dot(one_hot_classes, random_colors)\n\n return trimesh.points.PointCloud(vertices=pc, colors=pc_colors)", "def get_pointcloud(self, drivename, fname, dtype=str, ground_removed=False):\n\t\tbin_dir = join(self.DATASET_DIR, drivename, self.INPUT_BIN_DIR)\n\t\tfilename = join(bin_dir, fname.split(\".\")[0] + \".bin\")\n\t\tself.PC_ID = fname.split(\".\")[0]\n\t\tdata = np.fromfile(filename, dtype=np.float32)\n\t\tif ground_removed:\n\t\t\tfilename = join(self.DATASET_DIR, drivename, self.GROUND_REMOVED_DIR, fname.split(\".\")[0] + \".bin\")\n\t\t\tdata = np.fromfile(filename, dtype=np.float32)\t\n\t\tif dtype == str:\n\t\t\tdata = data.flatten(order=\"C\").tolist()\n\t\t\tdata_str = (\",\").join([str(x) for x in data])\n\t\t\treturn data_str\n\t\telse:\n\t\t\tif ground_removed:\n\t\t\t\treturn data.reshape((-1,4))\n\t\t\telse:\n\t\t\t\treturn data.reshape((-1,4))[:,:3]", "def get_hand_points(self, cloud):\n cloud_tree = cKDTree(cloud)\n search_radius = np.linalg.norm(np.array([self.image_depth, self.image_width, self.image_height]) / 2.0)\n indices = cloud_tree.query_ball_point(self.center, search_radius)\n points = cloud[indices, :]\n\n hTb = np.linalg.inv(self.T)\n workspace = [(-self.image_height/2, self.image_height/2),\n (-self.image_width/2, self.image_width/2),\n (-self.image_depth/2, self.image_depth/2)]\n points = point_cloud_util.transform(hTb, points)\n points = point_cloud_util.filter_workspace(workspace, points)\n\n return points", "def trimesh_from_point_cloud(cloud):\n points = np.asarray(cloud)\n hull = scipy.spatial.ConvexHull(points)\n hull = scipy.spatial.ConvexHull(points[hull.vertices])\n ru.transforms.counterclockwise_hull(hull)\n vertices = hull.points\n faces = hull.simplices\n return vertices, faces", "def test_get_mesh_grid_as_point_cloud_3x2rect() -> None:\n min_x = -3 # integer, minimum x-coordinate of 2D grid\n max_x = -1 # integer, maximum x-coordinate of 2D grid\n min_y = 2 # integer, minimum y-coordinate of 2D grid\n max_y = 3 # integer, maximum y-coordinate of 2D grid\n\n # return pts, a Numpy array of shape (N,2)\n pts = mesh_grid_utils.get_mesh_grid_as_point_cloud(\n min_x, max_x, min_y, max_y, downsample_factor=1.0\n )\n\n assert pts.shape == (6, 2)\n # fmt: off\n gt_pts: NDArrayFloat = np.array(\n [\n [-3.0, 2.0],\n [-2.0, 2.0],\n [-1.0, 2.0],\n [-3.0, 3.0],\n [-2.0, 3.0],\n [-1.0, 3.0]\n ])\n # fmt: on\n assert np.allclose(gt_pts, pts)", "def PointCloudfromStructOutput(self,file):\n print(\"Creating Structure Point Cloud\")\n xyz = self.readStructOutput(file)\n pc = np.zeros((int(len(xyz)/2.0),3))\n pc[:,0] = xyz[::2,0]*1000\n pc[:,1] = xyz[::2,1]*1000\n pc[:,2] = xyz[::2,2]*1000\n head = \"\"\"X,Y,Z\"\"\"\n np.savetxt(file, pc, delimiter=',',fmt='%.10f', header=head)\n return", "def getCartesianPointsImage(self, points):\n return getCartesianPointsImage(points, self)", "def PointCloudData(pdbid, chainid):\n pc = []\n bf = []\n resnames = []\n hets = []\n\n if not os.path.exists(os.getcwd() + '/' + filename):\n pdbl = PDB.PDBList()\n pdbl.retrieve_pdb_file(pdbid, False, os.getcwd(), 'pdb', True)\n parser = PDB.PDBParser(PERMISSIVE=1)\n structure = parser.get_structure(pdbid, 'pdb'+pdbid+'.ent')\n model = structure[0]\n chain = model[chainid]\n for residue in chain:\n for atom in residue:\n if atom.get_id() == \"CA\":\n resnames.append(residue.get_resname())\n bf.append(atom.get_bfactor())\n pc.append(atom.get_coord())\n pointcloud = np.asarray(pc)\n return pointcloud, bf, resnames", "def load_point_cloud(filename, min_norm_normal=1e-5, dtype=torch.float64):\n v, _, n = pcu.load_mesh_vfn(filename, dtype=np.float64)\n v, idx, _ = pcu.deduplicate_point_cloud(v, 1e-15, return_index=True) # Deduplicate point cloud when loading it\n n = n[idx]\n\n # Some meshes have non unit normals, so build a binary mask of points whose normal has a reasonable magnitude\n # We use this mask to remove bad vertices\n mask = np.linalg.norm(n, axis=-1) > min_norm_normal\n\n # Keep the good points and normals\n x = v[mask].astype(np.float64)\n n = n[mask].astype(np.float64)\n n /= np.linalg.norm(n, axis=-1, keepdims=True)\n\n return torch.from_numpy(x).to(dtype), torch.from_numpy(n).to(dtype)", "def get_clusters(self,points):\n self.points = points\n self.__dabest = [self.__cmeans(points,i) for i in range(self.__start,self.__end)]\n ##self.hull = \n return self.__dabest", "def test_get_mesh_grid_as_point_cloud_3x3square() -> None:\n min_x = -3 # integer, minimum x-coordinate of 2D grid\n max_x = -1 # integer, maximum x-coordinate of 2D grid\n min_y = 2 # integer, minimum y-coordinate of 2D grid\n max_y = 4 # integer, maximum y-coordinate of 2D grid\n\n # return pts, a Numpy array of shape (N,2)\n pts = mesh_grid_utils.get_mesh_grid_as_point_cloud(\n min_x, max_x, min_y, max_y, downsample_factor=1.0\n )\n\n assert pts.shape == (9, 2)\n gt_pts: NDArrayFloat = np.array(\n [\n [-3.0, 2.0],\n [-2.0, 2.0],\n [-1.0, 2.0],\n [-3.0, 3.0],\n [-2.0, 3.0],\n [-1.0, 3.0],\n [-3.0, 4.0],\n [-2.0, 4.0],\n [-1.0, 4.0],\n ]\n )\n\n assert np.allclose(gt_pts, pts)", "def __get_points_object__(xyz):\n # TODO: Need to fix this to handle all points, not just the first\n source = vtk.vtkPointSource()\n source.SetCenter(xyz[0])\n source.SetRadius(0)\n source.SetNumberOfPoints(1)\n source.Update()\n return source", "def obtain_points(self):\n # Swapaxes makes the output a column rather than a row\n return np.swapaxes(np.array([np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateX\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateY\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateZ\"][\" data\"][:, :, :])]), 0, 1)", "def render_point_cloud(self, point_cloud, extrinsics=Pose(), color=GRAY):\n\n combined_transform = self._bev_rotation * extrinsics\n\n pointcloud_in_bev = combined_transform * point_cloud\n point_cloud2d = pointcloud_in_bev[:, :2]\n\n point_cloud2d[:, 0] = (self._center_pixel[0] + point_cloud2d[:, 0] * self._pixels_per_meter)\n point_cloud2d[:, 1] = (self._center_pixel[1] + point_cloud2d[:, 1] * self._pixels_per_meter)\n\n H, W = self.data.shape[:2]\n uv = point_cloud2d.astype(np.int32)\n in_view = np.logical_and.reduce([\n (point_cloud2d >= 0).all(axis=1),\n point_cloud2d[:, 0] < W,\n point_cloud2d[:, 1] < H,\n ])\n uv = uv[in_view]\n self.data[uv[:, 1], uv[:, 0], :] = color", "def render_point_cloud(frame_id):\n point_cloud_world_coordinates = get_point_cloud_world_coordinates(frame_id)\n # pptk\n v = pptk.viewer(point_cloud_world_coordinates)\n v.set(point_size=0.0001)\n v.color_map('cool', scale=[0, 5])", "def _build_point_cloud_graph(self):\n depths = self.depths_placeholder[tf.newaxis]\n images = self.images_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n intrinsics = intrinsics_vec_to_matrix(intrinsics)\n\n depths_pad = tf.pad(depths, [[0,0],[0,0],[0,1],[0,1]], \"CONSTANT\")\n\n depths_grad = \\\n (depths_pad[:, :, 1:, :-1] - depths_pad[:, :, :-1, :-1])**2 + \\\n (depths_pad[:, :, :-1, 1:] - depths_pad[:, :, :-1, :-1])**2\n\n # don't use large depths for point cloud and ignore boundary regions\n valid = (depths < 3.0) & (depths_grad < 0.001)\n\n batch, num, ht, wd = tf.unstack(tf.shape(depths), num=4)\n Ts = VideoSE3Transformation(matrix=poses)\n X0 = projective_ops.backproject(depths, intrinsics)\n \n # transform point cloud into world coordinates\n X1 = Ts.inv()(X0)\n\n crop_h = 12\n crop_w = 32\n\n X1 = X1[:, :, crop_h:-crop_h, crop_w:-crop_w]\n valid = valid[:, :, crop_h:-crop_h, crop_w:-crop_w]\n images = images[:, :, crop_h:-crop_h, crop_w:-crop_w, ::-1]\n \n X1 = tf.reshape(X1, [-1, 3])\n colors = tf.reshape(images, [-1, 3])\n\n valid_inds = tf.where(tf.reshape(valid, [-1]))\n valid_inds = tf.reshape(valid_inds, [-1])\n\n X1 = tf.gather(X1, valid_inds, axis=0)\n colors = tf.gather(colors, valid_inds, axis=0)\n\n self.outputs['point_cloud'] = (X1, colors)", "def test_get_mesh_grid_as_point_cloud_downsample() -> None:\n min_x = -3 # integer, minimum x-coordinate of 2D grid\n max_x = 0 # integer, maximum x-coordinate of 2D grid\n min_y = 2 # integer, minimum y-coordinate of 2D grid\n max_y = 5 # integer, maximum y-coordinate of 2D grid\n\n # return pts, a Numpy array of shape (N,2)\n pts = mesh_grid_utils.get_mesh_grid_as_point_cloud(\n min_x, max_x, min_y, max_y, downsample_factor=3.0\n )\n\n assert pts.shape == (4, 2)\n\n # fmt: off\n gt_pts: List[List[float]] = [\n [-3.0, 2.0],\n [0.0, 2.0],\n [-3.0, 5.0],\n [0.0, 5.0]\n ]\n # fmt: on\n assert np.allclose(gt_pts, pts)", "def debug_filter_points(self, points):\n cloud_msg = PointCloud2()\n cloud_msg.header.frame_id = \"map\"\n cloud_msg.header.stamp = rospy.Time.now() \n xyz = [[p.pose.position.x, p.pose.position.y, p.pose.position.z] for p in points] \n point_cloud = pc2.create_cloud_xyz32(cloud_msg.header, xyz)\n self._points_publisher.publish(point_cloud)", "def filterPoints(self, pcd):\n\n minX, minY, minZ = self.min_point\n maxX, maxY, maxZ = self.max_point\n npCloud = np.asarray(pcd.points)\n\n result = npCloud[np.where((npCloud[:,0] < maxX) & (npCloud[:,0] > minX) \n & (npCloud[:,1] < maxY) & (npCloud[:,1] > minY)\n & (npCloud[:,2] < maxZ) & (npCloud[:,2] > minZ))[0]]\n\n return npToPcd(result)", "def get_points(self):\n try:\n return self.current_3D_points\n except:\n print('no such current_image')", "def points(self):\n return self._arr.T.ravel().view(\n dtype=[('x', self.dtype), ('y', self.dtype), ('z', self.dtype)])", "def get_segmented_point_clouds(seg_masks, depth): \n obj_labels = np.unique(seg_masks)\n num_objs = obj_labels.shape[0]+1\n rows, cols = seg_masks.shape\n cm = plt.get_cmap('gist_rainbow')\n colors = [cm(1. * i/num_objs) for i in range(num_objs)]\n \n object_dict = {}\n # key - object label; val - depth array of that object\n for i in obj_labels:\n object_dict[i] = np.zeros((rows,cols), dtype = np.float32)\n\n for i in range(rows):\n for j in range(cols):\n if seg_masks[i][j] != 0 and seg_masks[i][j] != -1:\n object_dict[seg_masks[i][j]][i][j] = depth[i][j]\n \n segmented_pcds = []\n for key, val in object_dict.items():\n if key == -1 or key == 0:\n continue\n img = o3d.geometry.Image(val)\n pcd_from_depth = o3d.geometry.PointCloud.create_from_depth_image(\n img,\n o3d.camera.PinholeCameraIntrinsic(\n o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n\n # Multiply with Transformation matrix to get correct view of the PCD\n pcd_from_depth.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n pcd_from_depth.paint_uniform_color(np.array(colors[key][:3], dtype = np.uint8) * 255)\n segmented_pcds.append(pcd_from_depth)\n return segmented_pcds", "def load_point_cloud(self, filename):\n f = sio.loadmat(filename)\n data = f['blob'][:]\n data -= np.mean(data, 0)\n data /= np.amax(abs(data))\n label = DataHandler.get_label_from_filename(filename)\n if self.use_softmax:\n l = np.zeros([2])\n l[label] = 1\n label = l\n return data, label", "def get_clipped_pointcloud(pointcloud, boundary):\r\n\tassert (pointcloud.shape[0]>=2)\r\n\tpointcloud = pointcloud[:,np.logical_and(pointcloud[0,:]<boundary[1], pointcloud[0,:]>boundary[0])]\r\n\tpointcloud = pointcloud[:,np.logical_and(pointcloud[1,:]<boundary[3], pointcloud[1,:]>boundary[2])]\r\n\treturn pointcloud", "def get_pointcloud(dataset, NUM_POINT=2048, shuffle=True):\n if dataset == 'modelnet':\n train_file_idxs = np.arange(0, len(TRAIN_FILES_MODELNET))\n data_train = []\n label_train = []\n for fn in range(len(TRAIN_FILES_MODELNET)):\n print('----' + str(fn) + '-----')\n current_data, current_label = provider.loadDataFile(TRAIN_FILES_MODELNET[fn])\n current_data = current_data[:,0:NUM_POINT,:]\n current_label = np.squeeze(current_label)\n data_train.append(current_data)\n label_train.append(current_label)\n result_train = np.vstack(data_train)\n label_train = np.concatenate(label_train, axis=None)\n if shuffle:\n X_train, y_train, _ = provider.shuffle_data(result_train, np.squeeze(label_train)) \n else:\n X_train, y_train = result_train, np.squeeze(label_train)\n \n data_test = []\n label_test = []\n for fn in range(len(TEST_FILES_MODELNET)):\n print('----' + str(fn) + '-----')\n current_data, current_label = provider.loadDataFile(TEST_FILES_MODELNET[fn])\n current_data = current_data[:,0:NUM_POINT,:]\n current_label = np.squeeze(current_label)\n data_test.append(current_data)\n label_test.append(current_label)\n result_test = np.vstack(data_test)\n label_test = np.concatenate(label_test, axis=None)\n if shuffle:\n X_test, y_test, _ = provider.shuffle_data(result_test, np.squeeze(label_test))\n else:\n X_test, y_test = result_test, np.squeeze(label_test)\n elif dataset == 'shapenet':\n shapenet_data, shapenet_label = provider.get_shapenet_data()\n shapenet_data = shapenet_data[:,0:NUM_POINT,:]\n X_train, X_test, y_train, y_test = train_test_split(shapenet_data, shapenet_label, test_size=0.2, random_state=42, shuffle=shuffle)\n elif dataset == 'shapenet_chair':\n shapenet_data, shapenet_label = provider.get_shapenet_data()\n shapenet_data = shapenet_data[:,0:NUM_POINT,:]\n shapenet_data, shapenet_label = shapenet_data[shapenet_label==17], shapenet_label[shapenet_label==17]\n X_train, X_test, y_train, y_test = train_test_split(shapenet_data, shapenet_label, test_size=0.2, random_state=42, shuffle=shuffle)\n elif dataset == 'modelnet10':\n current_data, current_label = provider.loadDataFile(MODELNET10_TRAIN_FILE)\n current_data = current_data[:,0:NUM_POINT,:]\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n current_label = np.squeeze(current_label)\n X_train, y_train = current_data, current_label\n\n current_data, current_label = provider.loadDataFile(MODELNET10_TEST_FILE)\n current_data = current_data[:,0:NUM_POINT,:]\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n current_label = np.squeeze(current_label)\n X_test, y_test = current_data, current_label\n elif dataset == 'keypoint':\n current_data, current_label = provider.load_mat_keypts(TRAIN_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end\n idx = np.arange(current_data.shape[1])\n np.random.shuffle(idx)\n current_data = current_data[:, idx, :]\n current_label = current_label[:, idx]\n current_label = np.squeeze(current_label)\n X_train, y_train = current_data, current_label\n\n current_data, current_label = provider.load_mat_keypts(TEST_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n for i in range(current_data.shape[0]):\n idx = np.arange(current_data.shape[1])\n np.random.shuffle(idx)\n current_data = current_data[:, idx, :]\n current_label = current_label[:, idx]\n current_label = np.squeeze(current_label)\n X_test, y_test = current_data, current_label\n elif dataset == 'keypoint_10class':\n current_data, current_label = provider.load_mat_keypts(TRAIN_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)\n current_label[:, -10:] = np.arange(1, 11)\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end\n idx = np.arange(current_data.shape[1])\n np.random.shuffle(idx)\n current_data = current_data[:, idx, :]\n current_label = current_label[:, idx]\n current_label = np.squeeze(current_label)\n X_train, y_train = current_data, current_label\n\n current_data, current_label = provider.load_mat_keypts(TEST_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)\n current_label[:, -10:] = np.arange(1, 11)\n if shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n for i in range(current_data.shape[0]):\n idx = np.arange(current_data.shape[1])\n np.random.shuffle(idx)\n current_data = current_data[:, idx, :]\n current_label = current_label[:, idx]\n current_label = np.squeeze(current_label)\n X_test, y_test = current_data, current_label\n elif dataset == \"keypointnet\":\n json_path = osp.join(KEYPOINTNET_PATH, \"annotations/all.json\")\n annots = json.load(open(json_path))\n X = []\n y = []\n for annot in annots:\n class_id = annot[\"class_id\"]\n model_id = annot[\"model_id\"]\n kpts = []\n for kpt in annot[\"keypoints\"]:\n kpts.append(kpt[\"xyz\"])\n pcd_path = osp.join(KEYPOINTNET_PATH, f\"pcds/{class_id}/{model_id}.pcd\")\n if os.path.exists(pcd_path):\n pcd = naive_read_pcd(pcd_path)\n pcd = pcd[0:NUM_POINT, :]\n else:\n continue\n if len(kpts) != 10:\n continue\n pcd = np.concatenate((pcd[:-10], kpts))\n label = np.zeros(NUM_POINT-10)\n label = np.concatenate((label, np.ones(10)))\n X.append(pcd)\n y.append(label)\n current_data = np.array(X)\n current_label = np.array(y)\n if False and shuffle:\n current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) \n for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end\n idx = np.arange(current_data.shape[1])\n np.random.shuffle(idx)\n current_data = current_data[:, idx, :]\n current_label = current_label[:, idx]\n current_label = np.squeeze(current_label)\n X_train, X_test, y_train, y_test = train_test_split(current_data, current_label, test_size=0.2, random_state=42, shuffle=shuffle)\n else:\n raise NotImplementedError()\n print(f'Dataset name: {dataset}')\n print(f'X_train: {X_train.shape}')\n print(f'X_test: {X_test.shape}')\n print(f'y_train: {y_train.shape}')\n print(f'y_test: {y_test.shape}')\n return X_train, X_test, y_train, y_test", "def GetPoints(self):\n if not self.VTKObject.GetPoints():\n return None\n array = vtkDataArrayToVTKArray(\n self.VTKObject.GetPoints().GetData(), self)\n array.Association = ArrayAssociation.POINT\n return array", "def image_to_point_cloud(depth):\n cx, cy, fx, fy = 5.8818670481438744*100, 3.1076280589210484*100, 5.8724220649505514*100, 2.2887144980135292*100\n depth = depth[:,:,0]\n\n rows, cols = depth.shape\n c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True)\n valid = (depth > 0) & (depth < 255)\n # print(valid)\n z = np.where(valid, depth / 256.0, np.nan)\n x = np.where(valid, z * (c - cx) / fx, 0)\n y = np.where(valid, z * (r - cy) / fy, 0)\n return np.dstack((x, y, z))", "def project(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))\n\n points_proj = self.S.dot(point_cloud.data) + self.t\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = np.tile(points_proj[2,:], [3, 1])\n points_proj = np.divide(points_proj, point_depths)\n if round_px:\n points_proj = np.round(points_proj)\n\n if isinstance(point_cloud, Point):\n return Point(data=points_proj[:2,:].astype(np.int16), frame=self._frame)\n return ImageCoords(data=points_proj[:2,:].astype(np.int16), frame=self._frame)", "def __splitPointCloud(self):\n x = self.pointcloud[:, 0]\n y = self.pointcloud[:, 1]\n ones = np.matrix(np.ones(x.shape[0])).T\n values = (np.hstack((x, ones)) * self.line_params) - y\n # Return two sets of points based on the sign\n values = np.array(values).ravel()\n P1 = np.where(values < 0)[0]\n P2 = np.where(values > 0)[0]\n return P1, P2", "def augment_cloud(Ps):\n \"Augmented params:\"\n pc_augm_scale=0\n pc_augm_rot=1\n pc_augm_mirror_prob=0.5\n pc_augm_jitter=0\n\n M = transforms3d.zooms.zfdir2mat(1)\n if pc_augm_scale > 1:\n s = random.uniform(1/pc_augm_scale, pc_augm_scale)\n M = np.dot(transforms3d.zooms.zfdir2mat(s), M)\n if pc_augm_rot:\n angle = random.uniform(0, 2*math.pi)\n M = np.dot(transforms3d.axangles.axangle2mat([0,0,1], angle), M) # z=upright assumption\n if pc_augm_mirror_prob > 0: # mirroring x&y, not z\n if random.random() < pc_augm_mirror_prob/2:\n M = np.dot(transforms3d.zooms.zfdir2mat(-1, [1,0,0]), M)\n if random.random() < pc_augm_mirror_prob/2:\n M = np.dot(transforms3d.zooms.zfdir2mat(-1, [0,1,0]), M)\n result = []\n for P in Ps:\n P[:,:3] = np.dot(P[:,:3], M.T)\n\n if pc_augm_jitter:\n sigma, clip= 0.01, 0.05 # https://github.com/charlesq34/pointnet/blob/master/provider.py#L74\n P = P + np.clip(sigma * np.random.randn(*P.shape), -1*clip, clip).astype(np.float32)\n result.append(P)\n return result", "def clustering(pcd: o3d.geometry.PointCloud):\n with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n labels = np.array(pcd.cluster_dbscan(eps=1, min_points=30, print_progress=True))\n\n max_label = labels.max()\n print(f\"point cloud has {max_label + 1} clusters\")\n colors = plt.get_cmap(\"tab20b\")(labels / (max_label if max_label > 0 else 1))\n colors[labels < 0] = 0\n pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])\n return pcd, labels", "def get_points(self):\r\n return self.points", "def create_cloud_xyz32rgb(header, points):\n fields = [PointField('x', 0, PointField.FLOAT32, 1),\n PointField('y', 4, PointField.FLOAT32, 1),\n PointField('z', 8, PointField.FLOAT32, 1),\n PointField('rgb', 12, PointField.UINT32, 1)]\n return pcl2.create_cloud(header, fields, points)", "def show_pointclouds(points, colors, text=[], title=\"Default\", png_path=\"\", interactive=True, orientation='horizontal'):\n\n # make sure pointclouds is a list\n assert isinstance(points, type([])), \\\n \"Pointclouds argument must be a list\"\n\n # make sure colors is a list\n assert isinstance(colors, type([])), \\\n \"Colors argument must be a list\"\n\n # make sure number of pointclouds and colors are the same\n assert len(points) == len(colors), \\\n \"Number of pointclouds (%d) is different then number of colors (%d)\" % (len(points), len(colors))\n\n while len(text) < len(points):\n text.append(\"\")\n\n # Number of pointclouds to be displayed in this window\n num_pointclouds = len(points)\n\n point_size = 10\n pointclouds = [VtkPointCloud(point_size) for _ in range(num_pointclouds)]\n renderers = [vtk.vtkRenderer() for _ in range(num_pointclouds)]\n\n height = 1.0 / max(num_pointclouds, 1)\n viewports = [(i*height, (i+1)*height) for i in range(num_pointclouds)]\n #print(viewports)\n\n # iterate over all point clouds\n for i, pc in enumerate(points):\n pc = pc.squeeze()\n co = colors[i].squeeze()\n assert pc.shape[0] == co.shape[0], \\\n \"expected same number of points (%d) then colors (%d), cloud index = %d\" % (pc.shape[0], co.shape[0], i)\n assert pc.shape[1] == 3, \"expected points to be N x 3, got N x %d\" % pc.shape[1]\n assert co.shape[1] == 3, \"expected colors to be N x 3, got N x %d\" % co.shape[1]\n\n # for each point cloud iterate over all points\n for j in range(pc.shape[0]):\n point = pc[j, :]\n color = co[j, :]\n pointclouds[i].add_point(point, color)\n\n renderers[i].AddActor(pointclouds[i].vtkActor)\n # renderers[i].AddActor(vtk.vtkAxesActor())\n renderers[i].SetBackground(1.0, 1.0, 1.0)\n if orientation == 'horizontal':\n print(viewports[i][0])\n renderers[i].SetViewport(viewports[i][0], 0.0, viewports[i][1], 1.0)\n elif orientation == 'vertical':\n renderers[i].SetViewport(0.0, viewports[i][0], 1.0, viewports[i][1])\n else:\n raise Exception('Not a valid orientation!')\n renderers[i].ResetCamera()\n\n # Add circle to first render\n renderers[0].AddActor(getActorCircle())\n renderers[0].AddActor(getActorCircle(50, 49, color=(0, 1, 0)))\n\n # Text actors\n text_actors = [vtk.vtkTextActor() for _ in text]\n for i, ta in enumerate(text_actors):\n if orientation == 'horizontal':\n ta.SetInput(' ' + text[i])\n elif orientation == 'vertical':\n ta.SetInput(text[i] + '\\n\\n\\n\\n\\n\\n')\n else:\n raise Exception('Not a valid orientation!')\n txtprop = ta.GetTextProperty()\n txtprop.SetFontFamilyToArial()\n txtprop.SetFontSize(0)\n txtprop.SetColor(0, 0, 0)\n # txtprop.SetJustificationToCentered()\n # ta.SetDisplayPosition(500, 10)\n # ta.SetAlignmentPoint()\n renderers[i].AddActor(ta)\n\n # Render Window\n render_window = vtk.vtkRenderWindow()\n for renderer in renderers:\n render_window.AddRenderer(renderer)\n\n render_window_interactor = vtk.vtkRenderWindowInteractor()\n render_window_interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())\n render_window_interactor.SetRenderWindow(render_window)\n\n [center_x, center_y, center_z] = np.mean(points[0].squeeze(), axis=0)\n camera = vtk.vtkCamera()\n # d = 10\n # camera.SetViewUp(0, -1, 0)\n\n # camera.SetPosition(center_x + d, center_y + d, center_z + d / 2)\n # camera.SetFocalPoint(center_x, center_y, center_z)\n # camera.SetFocalPoint(0, 0, 0)\n\n camera.SetViewUp(0, 0, 1)\n if orientation == 'horizontal':\n camera.SetPosition(3, -10, 2)\n camera.SetFocalPoint(3, 1.5, 1.5)\n elif orientation == 'vertical':\n camera.SetPosition(1.5, -6, 2)\n camera.SetFocalPoint(1.5, 1.5, 1.5)\n else:\n raise Exception('Not a valid orientation!')\n\n camera.SetClippingRange(0.002, 1000)\n for renderer in renderers:\n renderer.SetActiveCamera(camera)\n\n # Begin Interaction\n render_window.Render()\n render_window.SetWindowName(title)\n if orientation == 'horizontal':\n render_window.SetSize(1940, 720)\n elif orientation == 'vertical':\n render_window.SetSize(600, 1388)\n else:\n raise Exception('Not a valid orientation!')\n\n if interactive:\n render_window_interactor.Start()\n\n if png_path:\n # screenshot code:\n w2if = vtk.vtkWindowToImageFilter()\n w2if.SetInput(render_window)\n w2if.Update()\n\n writer = vtk.vtkPNGWriter()\n writer.SetFileName(png_path)\n writer.SetInputConnection(w2if.GetOutputPort())\n writer.Write()", "def project_to_image(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))\n\n points_proj = self.S.dot(point_cloud.data) + self.t\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = points_proj[2,:]\n point_z = np.tile(point_depths, [3, 1])\n points_proj = np.divide(points_proj, point_z)\n if round_px:\n points_proj = np.round(points_proj)\n points_proj = points_proj[:2,:].astype(np.int16)\n\n valid_ind = np.where((points_proj[0,:] >= 0) & \\\n (points_proj[1,:] >= 0) & \\\n (points_proj[0,:] < self.width) & \\\n (points_proj[1,:] < self.height))[0]\n\n depth_data = np.zeros([self.height, self.width])\n depth_data[points_proj[1,valid_ind], points_proj[0,valid_ind]] = point_depths[valid_ind]\n return DepthImage(depth_data, frame=self.frame)", "def get_points(self):\n\t\treturn self.points", "def voxelize(self, points):\n voxels, coors, num_points, voxel_centers = [], [], [], []\n for res in points:\n res_voxels, res_coors, res_num_points = self.voxel_layer(res)\n res_voxel_centers = (\n res_coors[:, [2, 1, 0]] + 0.5) * res_voxels.new_tensor(\n self.voxel_layer.voxel_size) + res_voxels.new_tensor(\n self.voxel_layer.point_cloud_range[0:3])\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxel_centers.append(res_voxel_centers)\n\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n voxel_centers = torch.cat(voxel_centers, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n\n voxel_dict = dict(\n voxels=voxels,\n num_points=num_points,\n coors=coors_batch,\n voxel_centers=voxel_centers)\n return voxel_dict", "def drawPointCloud(points, ax, color=None):\n if len(points.shape) != 2 or points.shape[0] != 3:\n raise ValueError(\"'points' must be 3xN\")\n if color == None:\n color = __color_cycle.next()\n elif color in (0, 1, 2):\n color = points[color, :]\n ax.scatter(points[0,:].T, points[1,:].T, points[2,:].T, c=color)", "def points(self):\n p = []\n for v in self.iter():\n p.append((v.x, v.y))\n return p", "def convert_to_point_cloud2(cloud):\n header = Header()\n header.frame_id = \"base_link\"\n header.stamp = rospy.Time.now()\n return point_cloud2.create_cloud_xyz32(header, cloud)", "def points(self):\n return np.vstack((self.x(), self.y()))", "def save_3d_render(\r\n self, points: List[np.ndarray], colors: List[np.ndarray]\r\n ) -> None:\r\n pcd = o3d.geometry.PointCloud()\r\n pcd.points = o3d.utility.Vector3dVector(np.vstack(points).astype(np.float64))\r\n pcd.colors = o3d.utility.Vector3dVector(np.vstack(colors))\r\n if self.debug:\r\n o3d.visualization.draw_geometries([pcd])\r\n if not self.debug:\r\n o3d.io.write_point_cloud(f\"results/{self.filename[:-4]}.ply\", pcd)", "def projection(self, point):\n return gs.copy(point)", "def project(self):\n def _project(point):\n return (\n point[0]/(point[2]/Window.COP_DISTANCE+1),\n point[1]/(point[2]/Window.COP_DISTANCE+1))\n\n self._points = [list(map(_project, face)) for face in self._points]", "def pointclouds_to_voxelgrids(pointclouds, resolution, origin=None, scale=None, return_sparse=False):\n if not isinstance(resolution, int):\n raise TypeError(f\"Expected resolution to be int \"\n f\"but got {type(resolution)}.\")\n\n if origin is None:\n min_val = torch.min(pointclouds, dim=1)[0]\n origin = min_val\n\n if scale is None:\n max_val = torch.max(pointclouds, dim=1)[0]\n scale = torch.max(max_val - origin, dim=1)[0]\n\n # Normalize pointcloud with origin and scale\n pointclouds = (pointclouds - origin.unsqueeze(1)) / scale.view(-1, 1, 1)\n\n vg = _base_points_to_voxelgrids(pointclouds, resolution, return_sparse=return_sparse)\n\n return vg", "def rotate_point_cloud(data):\n rotated_data = np.zeros(data.shape, dtype=np.float32)\n for k in xrange(data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def __getslice__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint3___getslice__(self, *args)", "def plot(self, subsample=None, valid_instance_types=None):\n pt = self.sample_indices(subsample, valid_instance_types)\n\n x, y, z = self.pc[pt, 0], self.pc[pt, 1], self.pc[pt, 2]\n color = self.color[pt]\n\n return plot_pointcloud(x, y, z, color=color)", "def initialise_particle_cloud(self, initialpose):\n # ----- Initialize the particle cloud as an empty array\n self.particlecloud = PoseArray()\n\n \"\"\"Create the noise to multiply by the random Gaussian number that will\n get added to each of the Poses, that are set to a random position\n and orientation around the initial pose\"\"\"\n sensorSigma=3 #variance\n sensorMu=0 #mean\n noise=sensorSigma * numpy.random.randn() + sensorMu\n\n \"\"\"Create a range for the ammount of random Gaussian values to generate \"\"\"\n randomGauss = 10*self.NUMBER_PREDICTED_READINGS\n\n gaussianRandomNumX = []\n gaussianRandomNumY = []\n randomYawArray = []\n\n for i in range (0,randomGauss):\n gaussianRandomNumX.append(random.gauss(0,1))\n gaussianRandomNumY.append(random.gauss(0,1))\n x=random.randint(1,180)\n randomYaw=(math.pi/x)\n randomYawArray.append(randomYaw)\n\n iterator = 0\n\n \"\"\"\n\t Set the particles to a random position and orientation around the initial pose\n \"\"\"\n particleNumber = 10**2 # 10**3 # 10**4 # 10**5 experiment with different ammounts of particles\n\n while iterator < particleNumber:\n particle = Pose()\n particle.position.x = initialpose.pose.pose.position.x + (gaussianRandomNumX[iterator] * noise)\n particle.position.y = initialpose.pose.pose.position.y + (gaussianRandomNumY[iterator] * noise)\n particle.position.z = initialpose.pose.pose.position.z\n particle.orientation = rotateQuaternion(initialpose.pose.pose.orientation, randomYawArray[iterator])\n\n self.particlecloud.poses.append(particle)\n iterator += 1\n\n return self.particlecloud", "def plotrgcloud(self):\n print self.kpunten\n for i in range(len(self.kpunten[0])):\n self.writetext('sen ='+ self.kpunten[0][i][0], (0.65,0.85), axnum = 0, hor = None ,ver = None , rot = None ,fs =14 , transform = self.fig.axes[0].transAxes)\n if i == len(self.kpunten[0]) -1 :\n end = None\n else:\n end = self.kpunten[0][i+1][1] + 1\n print end\n self.plotrgwrap( self.rgindex,2*self.reader.npair+self.rgindex,'real part of rgvars (a.u)' , 'imaginary part of rgvars (a.u.)', tit ='RG vars g = %f all states'%(self.chardata) , begin = self.kpunten[0][i][1] , stop = end , name = 'cpcloud'+ self.kpunten[0][i][0] , filenum = 0)", "def get_cpoints(self, viewer, points=None, no_rotate=False):\n # If points are passed, they are assumed to be in data space\n if points is None:\n points = self.path.get_points()\n\n return viewer.tform['data_to_plot'].to_(points)", "def get_3d_points(preds_3d):\n for i,p in enumerate(preds_3d):\n preds_3d[i] = preds_3d[i] - preds_3d[i].mean(0)*np.ones((16,1));\n return preds_3d;", "def preprocess_point_cloud(pcd, voxel_size,\n radius_normal=None,\n radius_feature=None):\n # # print(\":: Downsample with a voxel size %.3f.\" % voxel_size)\n pcd_down = pcd.voxel_down_sample(voxel_size)\n\n if radius_normal is None:\n radius_normal = voxel_size * 2.0\n if radius_feature is None:\n radius_feature = voxel_size * 5.0\n\n # print(\":: Estimate normal with search radius %.3f.\" % radius_normal)\n pcd_down.estimate_normals(\n open3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))\n\n # print(\":: Compute FPFH feature with search radius %.3f.\" % radius_feature)\n pcd_fpfh = open3d.registration.compute_fpfh_feature(\n pcd_down,\n open3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))\n return pcd_down, pcd_fpfh", "def get_points(self):\n\t\treturn self._points", "def vertices(self):\n return self.pointlist", "def pointcloud2_to_xyz_array(cloud_msg, remove_nans=True):\n return get_xyz_points(pointcloud2_to_array(cloud_msg), remove_nans=remove_nans)", "def convert_depth_frame_to_pointcloud(depth_image, camera_intrinsics ):\r\n\t\r\n\t[height, width] = depth_image.shape\r\n\r\n\tnx = np.linspace(0, width-1, width)\r\n\tny = np.linspace(0, height-1, height)\r\n\tu, v = np.meshgrid(nx, ny)\r\n\tx = (u.flatten() - camera_intrinsics.ppx)/camera_intrinsics.fx\r\n\ty = (v.flatten() - camera_intrinsics.ppy)/camera_intrinsics.fy\r\n\r\n\tz = depth_image.flatten() / 1000;\r\n\tx = np.multiply(x,z)\r\n\ty = np.multiply(y,z)\r\n\r\n\tx = x[np.nonzero(z)]\r\n\ty = y[np.nonzero(z)]\r\n\tz = z[np.nonzero(z)]\r\n\r\n\treturn x, y, z", "def createInputPointCloud(side_size:int, center_x:int, center_y:int, nb_pts:int):\n in_pts = np.zeros(shape=(nb_pts,2), dtype=np.float32)\n side_nb_pts = nb_pts / 4\n ds = side_size / side_nb_pts\n for i in range(nb_pts):\n if i < side_nb_pts:\n in_pts[i][0] = center_x + i * ds - side_size * 0.5\n in_pts[i][1] = center_y + side_size / 2\n elif i < 2 * side_nb_pts:\n in_pts[i][0] = center_x + side_size / 2\n in_pts[i][1] = center_y + (i - 1*side_nb_pts) * ds - side_size * 0.5\n elif i < 3 * side_nb_pts:\n in_pts[i][0] = center_x + (i - 2*side_nb_pts) * ds - side_size * 0.5\n in_pts[i][1] = center_y - side_size / 2\n else:\n in_pts[i][0] = center_x - side_size / 2\n in_pts[i][1] = center_y + (i - 3*side_nb_pts) * ds - side_size * 0.5\n return in_pts", "def convert_pointcloud_to_depth(pointcloud, camera_intrinsics):\r\n\r\n\tassert (pointcloud.shape[0] == 3)\r\n\tx_ = pointcloud[0,:]\r\n\ty_ = pointcloud[1,:]\r\n\tz_ = pointcloud[2,:]\r\n\r\n\tm = x_[np.nonzero(z_)]/z_[np.nonzero(z_)]\r\n\tn = y_[np.nonzero(z_)]/z_[np.nonzero(z_)]\r\n\r\n\tx = m*camera_intrinsics.fx + camera_intrinsics.ppx\r\n\ty = n*camera_intrinsics.fy + camera_intrinsics.ppy\r\n\r\n\treturn x, y", "def projectPoints(self, points):\n return [self.projectPoint(point) for point in points]", "def merge(pointclouds, pctype=PointCloud):\n sizes = [len(pc) for pc in pointclouds]\n arr = np.empty((3, sum(sizes)), dtype=_DTYPE)\n \n # Build up array from pcs\n i = 0\n for pc, size in zip(pointclouds, sizes):\n j = i + size\n arr[:,i:j] = pc.arr\n i = j\n return pctype(arr)", "def features(self, mask=None, propnames=None):\n\t\t\n\t\t# See if we have a cached result\n\t\tif self._features:\n\t\t\treturn self._features\n\t\t\n\t\tresult = {'type': 'FeatureCollection', 'features':[]}\n\t\tfeatures = []\n\t\t\t\t\t\t\t\t\t\t\n\t\t# We can dealt with grid type collections first\n\t\tif self.featuretype in ['Grid', 'GridSeries']:\n\t\t\t\n\t\t\t# Get center point latitudes and longitudes\n\t\t\tlatitudes = self.latitudes\n\t\t\tlongitudes = self.longitudes\n\t\t\tshape = latitudes.shape\n\t\t\t\n\t\t\t# How do we slice the data to get grid point values?\n\t\t\tindex = 0\n\t\t\tfor dim in self.variable.dimensions:\n\t\t\t\tprint dim, dim.length, len(self.times)\n\t\t\t\tif dim.length == shape[0]:\n\t\t\t\t\ty_index = index\n\t\t\t\tif dim.length == shape[1]:\n\t\t\t\t\tx_index = index\n\t\t\t\tif dim.length == len(self.times):\n\t\t\t\t\tt_index = index\n\t\t\t\tindex += 1\n\t\t\t\n\t\t\t\n\t\t\t# Create the initial slices with indices defaulting to 0\n\t\t\tslices = [0]*len(self.variable.dimensions)\n\t\t\tslices[t_index] = slice(0,len(self.times))\n\n\t\t\t\t\t\t\n\t\t\t# Create corner point latitude longitude arrays\n\t\t\tcorner_lats = numpy.zeros((shape[0]+1, shape[1]+1))\n\t\t\tcorner_lons = numpy.zeros((shape[0]+1, shape[1]+1))\n\t\t\t\t\t\t\n\t\t\t# Step through all the interior grid points\n\t\t\tfor y in range(1, shape[0]):\n\t\t\t\tfor x in range(1, shape[1]):\n\t\t\t\t\tcorner_lats[y,x] = (latitudes[y, x-1] + latitudes[y,x] + latitudes[y-1,x-1] + latitudes[y-1,x])/4\n\t\t\t\t\tcorner_lons[y,x] = (longitudes[y, x-1] + longitudes[y,x] + longitudes[y-1,x-1] + longitudes[y-1,x])/4\n\t\t\t\t\t\n\t\t\t# Left boundary\n\t\t\tx = 0\n\t\t\tfor y in range(1,shape[0]):\n\t\t\t\ttmp_lat = (latitudes[y,x] + latitudes[y-1,x])/2\n\t\t\t\ttmp_lon = (longitudes[y,x] + longitudes[y-1,x])/2\n\t\t\t\tcorner_lats[y,x] = tmp_lat - (corner_lats[y,x+1] - tmp_lat)\n\t\t\t\tcorner_lons[y,x] = tmp_lon - (corner_lons[y,x+1] - tmp_lon)\n\n\n\t\t\t# Right boundary\n\t\t\tx = shape[1]\n\t\t\tfor y in range(1,shape[0]):\n\t\t\t\ttmp_lat = (latitudes[y,x-1] + latitudes[y-1,x-1])/2\n\t\t\t\ttmp_lon = (longitudes[y,x-1] + longitudes[y-1,x-1])/2\n\t\t\t\tcorner_lats[y,x] = tmp_lat - (corner_lats[y,x-1] - tmp_lat)\n\t\t\t\tcorner_lons[y,x] = tmp_lon - (corner_lons[y,x-1] - tmp_lon)\n\n\n\t\t\t# Bottom boundary\n\t\t\ty = 0\n\t\t\tfor x in range(1,shape[1]):\n\t\t\t\ttmp_lat = (latitudes[y,x] + latitudes[y,x-1])/2\n\t\t\t\ttmp_lon = (longitudes[y,x] + longitudes[y,x-1])/2\n\t\t\t\tcorner_lats[y,x] = tmp_lat - (corner_lats[y+1,x] - tmp_lat)\n\t\t\t\tcorner_lons[y,x] = tmp_lon - (corner_lons[y+1,x] - tmp_lon)\n\n\t\t\t# Top boundary\n\t\t\ty = shape[0]\n\t\t\tfor x in range(1,shape[1]):\n\t\t\t\ttmp_lat = (latitudes[y-1,x] + latitudes[y-1,x-1])/2\n\t\t\t\ttmp_lon = (longitudes[y-1,x] + longitudes[y-1,x-1])/2\n\t\t\t\tcorner_lats[y,x] = tmp_lat - (corner_lats[y-1,x] - tmp_lat)\n\t\t\t\tcorner_lons[y,x] = tmp_lon - (corner_lons[y-1,x] - tmp_lon)\n\t\t\t\n\t\t\t# Corners\n\t\t\tcorner_lats[0,0] = latitudes[0,0] - (corner_lats[1,1] - latitudes[0,0])\n\t\t\tcorner_lats[0,shape[1]] = latitudes[0,shape[1]-1] - (corner_lats[1,shape[1]-1] - latitudes[0,shape[1]-1])\n\t\t\tcorner_lats[shape[0],0] = latitudes[shape[0]-1,0] + (latitudes[shape[0]-1,0] - corner_lats[shape[0]-1,1])\n\t\t\tcorner_lats[shape[0],shape[1]] = latitudes[shape[0]-1,shape[1]-1] + (latitudes[shape[0]-1,shape[1]-1] - corner_lats[shape[0]-1,shape[1]-1])\n\n\t\t\tcorner_lons[0,0] = longitudes[0,0] - (corner_lons[1,1] - longitudes[0,0])\n\t\t\tcorner_lons[0,shape[1]] = longitudes[0,shape[1]-1] + (longitudes[0,shape[1]-1] - corner_lons[1,shape[1]-1])\n\t\t\tcorner_lons[shape[0],0] = longitudes[shape[0]-1,0] - (corner_lons[shape[0]-1,1] - longitudes[shape[0]-1,0])\n\t\t\tcorner_lons[shape[0],shape[1]] = longitudes[shape[0]-1,shape[1]-1] + (longitudes[shape[0]-1,shape[1]-1] - corner_lons[shape[0]-1,shape[1]-1])\n\n\n#\t\t\tprint corner_lats\n\n\t\t\t# Now create all polygons\n\t\t\tfor y in range(0, shape[0]):\n\t\t\t\tfor x in range(0, shape[1]):\n\n\t\t\t\t\t# Configure the slices\n\t\t\t\t\tslices[x_index] = slice(x,x+1)\n\t\t\t\t\tslices[y_index] = slice(y,y+1)\n\n\t\t\t\t\t# Check if we are masking and if this point is masked\n\t\t\t\t\tmasked = False\n\n\t\t\t\t\tif mask:\n\t\t\t\t\t\tif mask[y, x] < 0.5:\n\t\t\t\t\t\t\tmasked = True\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tif not masked:\n\n\t\t\t\t\t\tvertices = []\n\t\t\t\t\t\tvertices.append([corner_lons[y, x], corner_lats[y,x]])\n\t\t\t\t\t\tvertices.append([corner_lons[y+1, x], corner_lats[y+1,x]])\n\t\t\t\t\t\tvertices.append([corner_lons[y+1, x+1], corner_lats[y+1,x+1]])\n\t\t\t\t\t\tvertices.append([corner_lons[y, x+1], corner_lats[y,x+1]])\n\t\t\t\t\t\tvertices.append([corner_lons[y, x], corner_lats[y,x]])\t\t\t\t\n\n\t\t\t\t\t\t# Create the basic feature\n\t\t\t\t\t\tfeature = {'type': 'Feature', 'properties':{'id':x + y * shape[1]}, 'geometry': {'type': 'Polygon', 'coordinates': [vertices]}}\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Now add the data\t\t\t\t\t\n\t\t\t\t\t\t#data = self.variable[slices].flatten()\n\t\t\t\t\t\t\n\t\t\t\t\t\t# If we have property names then extract data for each name\n\t\t\t\t\t\tif propnames:\n\t\t\t\t\t\t\tfor name in propnames:\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tfeature['properties']['value'] = self.variable[slices].flatten()[1]\n\t#\t\t\t\t\t\t\tprint self.variable[slices]\n\t\t\t\t\t\t\t\t#feature['properties']['value'] = self.variable[slices].flatten()[propnames.index(name)]\n\t\t\t\t\t\t\n\t\t\t\t\t\t# else just set property 'value' to the first value of the flattened data array\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\t\t#feature['properties']['value'] = float(self.variable[slices].flatten()[1])\n\t\t\t\t\t\t\n\t\t\t\t\t\t#print feature['properties']\n\t\t\t\t\t\t#, 'value':float(values[y,x])\n\t\t\t\t\t\tfeatures.append(feature)\n\t\t\t\t\t\n\t\t\tresult['features'] = features\n\t\t\t\t\t\t\n#\t\t\toutfile = open('test.json', 'w')\n#\t\t\toutfile.write(simplejson.dumps(result))\n#\t\t\toutfile.close()\n\t\t\t\n\t\t\n\t\t# Point type feature sets next\n\t\telif self.featuretype in ['Point', 'PointSeries']:\n\t\t\t\n\t\t\tresult = {'type': 'FeatureCollection', 'features':[]}\n\t\t\tfeatures = []\n\t\t\t\n\t\t\tlongitudes = self.longitudes\n\t\t\tlatitudes = self.latitudes\n\t\t\t\n\t\t\tcount = len(longitudes)\n\t\t\tfor fid in range(0,count):\n\t\t\t\tfeature = {'type':'Feature', 'properties':{'_id':fid}, 'geometry': {'type':'Point', 'coordinates': [float(longitudes[fid]), float(latitudes[fid])]}}\n\n\t\t\t\t# Add related variables to properties\n\t\t\t\tfor key in self.coordinates_mapping:\n\t\t\t\t\tif key in self.variable.group.variables and key not in ['latitude', 'longitude']:\n\t\t\t\t\t\tif self.coordinates_mapping[key]['map'] == self.coordinates_mapping['latitude']['map']:\n\t\t\t\t\t\t\tfeature['properties'][key] = self.variable.group.variables[key][fid]\n\t\t\t\t\t\t\t\n\t\t\t\tfeatures.append(feature)\n\t\t\t\t\n\t\t\tresult['features'] = features\n\n\t\t\t\n\t\telse:\n\t\t\treturn None\n\n\t\t# Cache result\n\t\tif not self._features:\n\t\t\tself._features = result\n\t\t\t\n\t\treturn result", "def convert_depth_frame_to_pointcloud(depth_image, camera_intrinsics ):\n\t\n\t[height, width] = depth_image.shape\n\n\tnx = np.linspace(0, width-1, width)\n\tny = np.linspace(0, height-1, height)\n\tu, v = np.meshgrid(nx, ny)\n\tx = (u.flatten() - camera_intrinsics.ppx)/camera_intrinsics.fx\n\ty = (v.flatten() - camera_intrinsics.ppy)/camera_intrinsics.fy\n\n\tz = depth_image.flatten() / 1000;\n\tx = np.multiply(x,z)\n\ty = np.multiply(y,z)\n\n\tx = x[np.nonzero(z)]\n\ty = y[np.nonzero(z)]\n\tz = z[np.nonzero(z)]\n\n\treturn x, y, z", "def pc_to_binvox(points, **kwargs):\n patch_size = kwargs.get(\"patch_size\", 40)\n percent_offset = kwargs.get(\"percent_offset\", (0.5, 0.5, 0.45))\n percent_patch_size = kwargs.get(\"percent_patch_size\", 0.8)\n\n if points.shape[1] != 3:\n raise Exception(\"Invalid pointcloud size, should be nx3, but is {}\".format(points.shape))\n\n if len(percent_offset) != 3:\n raise Exception(\"Percent offset should be a tuple of size 3, instead got {}\".format(percent_offset))\n\n percent_x, percent_y, percent_z = percent_offset\n\n # get the center of the pointcloud in meters. Ex: center = np.array([0.2, 0.1, 2.0])\n voxel_center = get_bbox_center(points)\n\n # get the size of an individual voxel. Ex: voxel_resolution=0.01 meaning 1cm^3 voxel\n # PERCENT_PATCH_SIZE determines how much extra padding to leave on the sides\n voxel_resolution = get_voxel_resolution(points, percent_patch_size * patch_size)\n\n # this tuple is where we want to stick the center of the pointcloud in our voxel grid\n # Ex: (20, 20, 18) leaving some extra room in the back half.\n pc_center_in_voxel_grid = (patch_size*percent_x, patch_size*percent_y, patch_size*percent_z)\n\n # create a voxel grid.\n vox_np = voxelize_points(\n points=points[:, 0:3],\n pc_bbox_center=voxel_center,\n voxel_resolution=voxel_resolution,\n num_voxels_per_dim=patch_size,\n pc_center_in_voxel_grid=pc_center_in_voxel_grid)\n\n # location in meters of the bottom corner of the voxel grid in world space\n offset = np.array(voxel_center) - np.array(pc_center_in_voxel_grid) * voxel_resolution\n\n # create a voxel grid object to contain the grid, shape, offset in the world, and grid resolution\n voxel_grid = binvox_rw.Voxels(vox_np, vox_np.shape, tuple(offset), voxel_resolution * patch_size, \"xyz\")\n\n # Where am I putting my point cloud relative to the center of my voxel grid\n # ex. (20, 20, 20) or (20, 20, 18)\n center_point_in_voxel_grid = (patch_size * percent_x, patch_size * percent_y, patch_size * percent_z)\n\n return voxel_grid, voxel_center, voxel_resolution, center_point_in_voxel_grid", "def getContents(self):\r\n cont=[]\r\n for i in range (len(self._indices)):\r\n cont.append(self._dataset.getPoint(self._indices[i]))\r\n return cont", "def point(self):\n return self.x, self.y, self.z", "def correct(point_cloud, axis_order=[0, 1, 2], dist_from_center=0):\n \n if type(axis_order) != list:\n axis_order = list(axis_order)\n\n # Adding the fixed distance to x and y.\n x = point_cloud[:, axis_order[0]] + dist_from_center\n y = point_cloud[:, axis_order[1]] + dist_from_center\n z = point_cloud[:, axis_order[2]]\n\n return np.vstack((x, y, z)).T", "def getPointValues(self, *args, **kwargs):\n ...", "def draw_clouds(a):\n small_cloud(0 + a, 0, 0)\n small_cloud(200 + a, -150, -10)\n big_cloud(350 + a, 0, -3)\n small_cloud(600 + a, -90, 3)\n small_cloud(800 + a, 0, 0)\n small_cloud(1000 + a, -150, -10)\n big_cloud(1150 + a, 0, -3)\n small_cloud(1400 + a, -90, 3)\n small_cloud(-800 + a, 0, 0)\n small_cloud(-600 + a, -150, -10)\n big_cloud(-450 + a, 0, -3)\n small_cloud(-200 + a, -90, 3)", "def create_points(self):\n v1 = 0.0\n v2 = 0.5\n v3 = 0.25\n v4 = 0.2 # only used for hexgrid\n\n points = []\n\n points.append((v1, v1, v1)) # 0\n points.append((v2, v1, v1)) # 1\n points.append((v2, v2, v1)) # 2\n points.append((v1, v2, v1)) # 3\n\n points.append((v1, v1, v2)) # 4\n points.append((v2, v1, v2)) # 5\n points.append((v2, v2, v2)) # 6\n points.append((v1, v2, v2)) # 7\n\n points.append((v3, v1, v1)) # 8\n points.append((v2, v3, v1)) # 9\n points.append((v3, v2, v1)) # 10\n points.append((v1, v3, v1)) # 11\n\n points.append((v1, v1, v3)) # 12\n points.append((v2, v1, v3)) # 13\n points.append((v2, v2, v3)) # 14\n points.append((v1, v2, v3)) # 15\n\n points.append((v3, v1, v2)) # 16\n points.append((v2, v3, v2)) # 17\n points.append((v3, v2, v2)) # 18\n points.append((v1, v3, v2)) # 19\n\n points.append((v4, v1, v1)) # 20\n points.append((v1, v4, v1)) # 21\n points.append((v1, v1, v4)) # 22\n\n return points", "def get_points(self):\r\n return self.nx*self.ny*self.nz", "def points(self) -> PointList:\n return self._points", "def get_clouds():\n clouds = [ x.get('cloud') for x in Schedconfig.objects.values('cloud').distinct() ]\n locale.setlocale(locale.LC_ALL, '')\n clouds = sorted(clouds, key=locale.strxfrm)\n return clouds", "def points(self):\r\n return self._structure.points", "def estimate_pose(self):\n # remove the outliers, keep the densest particles\n\n # compare all the possible distances between particles in our particlecloud\n\n distances = []\n i = 0\n for p1 in self.particlecloud.poses:\n i += 1\n for p2 in self.particlecloud.poses[i:]:\n distance = numpy.sqrt(((p1.position.x - p2.position.x)**2) \\\n + ((p1.position.y - p2.position.y)**2) \\\n + ((p1.position.z - p2.position.z)**2))\n distances.append(distance)\n\n # sort the distances and keep the first third of them\n min_dist = sorted(distances)[:int(round(len(distances) / 3))] # testing !! !!!!!!!!!!!!!!!\n # calculate each particle's number of appearances in the min_dist\n counter = numpy.zeros(len(self.particlecloud.poses))\n i = 0\n # increase the number of appearances depending on if the distance is included in the min_dist set\n for p1 in self.particlecloud.poses:\n i += 1\n j = i\n for p2 in self.particlecloud.poses[i:]:\n distance = numpy.sqrt(((p1.position.x - p2.position.x)**2) \\\n + ((p1.position.y - p2.position.y)**2) \\\n + ((p1.position.z - p2.position.z)**2))\n if distance in min_dist:\n counter[i - 1] += 1\n counter[j] += 1\n j += 1\n\n\n # sort counter and keep the particles corresponding to the last third\n sort_count = sorted(range(len(counter)), key=lambda k: counter[k])\n sort_count = sort_count[int(round(2 * len(sort_count) / 3)):]\n wanted_array=[]\n for i in sort_count:\n wanted_array.append(self.particlecloud.poses[i])\n est_pose = Pose()\n # find the mean position\n x_values = y_values = z_values = 0\n for p in wanted_array:\n x_values += p.position.x # means --> x_values = x_values + p.position.x\n y_values += p.position.y\n z_values += p.position.z\n\n\n meanX = x_values / len(wanted_array)\n meanY = y_values / len(wanted_array)\n meanZ = z_values / len(wanted_array)\n est_pose.position.x = meanX\n est_pose.position.y = meanY\n est_pose.position.z = meanZ\n\n # find the mean orientation\n x_values = y_values = z_values = w_values = 0\n for p in wanted_array:\n x_values += p.orientation.x\n y_values += p.orientation.y\n z_values += p.orientation.z\n w_values += p.orientation.w\n meanX = x_values / len(wanted_array)\n meanY = y_values / len(wanted_array)\n meanZ = z_values / len(wanted_array)\n meanW = w_values / len(wanted_array)\n est_pose.orientation.x = meanX\n est_pose.orientation.y = meanY\n est_pose.orientation.z = meanZ\n est_pose.orientation.w = meanW\n\n return est_pose", "def PCA (numpy_cloud ):\r\n\r\n # abort, if there are no points\r\n if (numpy_cloud.shape[0] == 0):\r\n #print (\"In normals.py, in PCA: The input array is empty. Returning a null vector and high sigma\")\r\n return np.array ((0, 0, 0)), 1.0, np.array ((0, 0, 0))\r\n\r\n # we only need three colums [X, Y, Z, I] -> [X, Y, Z]\r\n numpy_cloud = numpy_cloud[:, :3].copy () # copying takes roughly 0.000558 seconds per 1000 points\r\n cloud_size = numpy_cloud.shape[0]\r\n\r\n # get covariance matrix\r\n a_transposed_a, mass_center = build_covariance_matrix (numpy_cloud )\r\n\r\n # get normal vector and smallest eigenvalue\r\n normal_vector, smallest_eigenvalue = eigenvalue_decomposition (a_transposed_a )\r\n\r\n # the noise is based on the smallest eigenvalue and normalized by number of points in cloud\r\n noise = smallest_eigenvalue\r\n if (cloud_size <= 3 or noise < 1 * 10 ** -10):\r\n sigma = noise # no noise with 3 points\r\n else:\r\n sigma = sqrt(noise/(cloud_size - 3) )\r\n\r\n return normal_vector, sigma, mass_center", "def cf_to_points(ds: xr.Dataset):\n from shapely.geometry import MultiPoint, Point\n\n # Shorthand for convenience\n geo = ds.geometry_container.attrs\n\n # The features dimension name, defaults to the one of 'node_count' or the dimension of the coordinates, if present.\n feat_dim = None\n if \"coordinates\" in geo and feat_dim is None:\n xcoord_name, _ = geo[\"coordinates\"].split(\" \")\n (feat_dim,) = ds[xcoord_name].dims\n\n x_name, y_name = ds.geometry_container.attrs[\"node_coordinates\"].split(\" \")\n xy = np.stack([ds[x_name].values, ds[y_name].values], axis=-1)\n\n node_count_name = ds.geometry_container.attrs.get(\"node_count\")\n if node_count_name is None:\n # No node_count means all geometries are single points (node_count = 1)\n # And if we had no coordinates, then the dimension defaults to \"features\"\n feat_dim = feat_dim or \"features\"\n node_count = xr.DataArray([1] * xy.shape[0], dims=(feat_dim,))\n if feat_dim in ds.coords:\n node_count = node_count.assign_coords({feat_dim: ds[feat_dim]})\n else:\n node_count = ds[node_count_name]\n\n j = 0 # The index of the first node.\n geoms = np.empty(node_count.shape, dtype=object)\n # i is the feature index, n its number of nodes\n for i, n in enumerate(node_count.values):\n if n == 1:\n geoms[i] = Point(xy[j, :])\n else:\n geoms[i] = MultiPoint(xy[j : j + n, :])\n j += n\n\n return xr.DataArray(geoms, dims=node_count.dims, coords=node_count.coords)", "def point_cloud_to_volume(points, plb, vsize, radius=1.0):\r\n vol = np.zeros((vsize,vsize,vsize))\r\n vseg = np.zeros((vsize,vsize,vsize))\r\n voxel = 2*radius/float(vsize)\r\n locations = (points + radius)/voxel # shift all points to non-negative coordinates, assign to an occupancy grid\r\n locations = locations.astype(int)\r\n vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0\r\n vseg[locations[:,0],locations[:,1],locations[:,2]] = plb\r\n return vol, vseg", "def spatial(self):", "def np2pcd(points, colors=None, normals=None):\n pc = o3d.geometry.PointCloud()\n pc.points = o3d.utility.Vector3dVector(points)\n if colors is not None:\n colors = np.array(colors)\n if colors.ndim == 2:\n assert len(colors) == len(points)\n elif colors.ndim == 1:\n colors = np.tile(colors, (len(points), 1))\n else:\n raise RuntimeError(colors.shape)\n pc.colors = o3d.utility.Vector3dVector(colors)\n if normals is not None:\n assert len(points) == len(normals)\n pc.normals = o3d.utility.Vector3dVector(normals)\n return pc", "def _base_points_to_voxelgrids(points, resolution, return_sparse=False):\n batch_size = points.shape[0]\n num_p = points.shape[1]\n\n device = points.device\n dtype = points.dtype\n\n vg_size = (batch_size, resolution, resolution, resolution)\n\n mult = torch.ones(batch_size, device=device, dtype=dtype) * (resolution - 1) # size of (batch_size)\n\n prefix_index = torch.arange(start=0, end=batch_size, device=device, dtype=torch.long).repeat(num_p, 1).T.reshape(-1, 1)\n\n pc_index = torch.round(((points) * mult.view(-1, 1, 1))).long()\n pc_index = torch.cat((prefix_index, pc_index.reshape(-1, 3)), dim=1)\n pc_index = torch.unique(pc_index, dim=0)\n\n # filter point that is outside of range 0 and resolution - 1\n condition = pc_index[:, 1:] <= (resolution - 1)\n condition = torch.logical_and(condition, pc_index[:, 1:] >= 0)\n row_cond = condition.all(1)\n\n pc_index = pc_index[row_cond, :]\n pc_index = pc_index.reshape(-1, 4)\n\n vg = torch.sparse.FloatTensor(\n pc_index.T,\n torch.ones(pc_index.shape[0], device=pc_index.device, dtype=dtype),\n vg_size\n )\n\n if not return_sparse:\n vg = vg.to_dense().to(dtype)\n\n return vg" ]
[ "0.8203496", "0.77815324", "0.7489397", "0.743419", "0.7415985", "0.7225747", "0.708484", "0.7042392", "0.6747202", "0.6592699", "0.6537746", "0.65085596", "0.6495195", "0.6450036", "0.64452165", "0.64442986", "0.64307445", "0.6306346", "0.62692606", "0.6268943", "0.6201976", "0.6176701", "0.6158885", "0.6157855", "0.615592", "0.6127091", "0.60804635", "0.60628915", "0.6061845", "0.6057568", "0.60516965", "0.59923756", "0.59839404", "0.5963975", "0.5956477", "0.59524846", "0.591164", "0.5902933", "0.5866572", "0.5849307", "0.5838114", "0.5815041", "0.58118975", "0.5799699", "0.57909197", "0.57666755", "0.57545865", "0.57527065", "0.57408684", "0.57284576", "0.5719414", "0.57099843", "0.5697688", "0.56968486", "0.56831384", "0.5681418", "0.56748796", "0.5652445", "0.56513196", "0.56477535", "0.5645504", "0.56300557", "0.562746", "0.5611293", "0.5611041", "0.56091654", "0.5601188", "0.5578207", "0.55722237", "0.55631775", "0.5561945", "0.5560558", "0.5554672", "0.55388594", "0.55174446", "0.55148727", "0.551361", "0.5511972", "0.55055153", "0.55011445", "0.5498913", "0.54947615", "0.5490517", "0.5489504", "0.548658", "0.5482217", "0.5476378", "0.54682124", "0.54674464", "0.5456326", "0.5446029", "0.54306024", "0.54295444", "0.5425137", "0.54247504", "0.5421983", "0.5419021", "0.5416698", "0.540744", "0.540191", "0.5401666" ]
0.0
-1
Python version of Mastering Opencv With Practical Computer Vision Projects' LST implementation on page 144
def linear_ls_triangulation(self, point_a, cam_a, point_b, cam_b): # build A matrix # import pdb; pdb.set_trace() point_a = point_a.flatten() point_b = point_b.flatten() mat_a = np.matrix([ [point_a[0]*cam_a[2, 0]-cam_a[0, 0], point_a[0]*cam_a[2, 1]-cam_a[0, 1], point_a[0]*cam_a[2, 2]-cam_a[0, 2]], [point_a[1]*cam_a[2, 0]-cam_a[1, 0], point_a[1]*cam_a[2, 1]-cam_a[1, 1], point_a[1]*cam_a[2, 2]-cam_a[1, 2]], [point_b[0]*cam_b[2, 0]-cam_b[0, 0], point_b[0]*cam_b[2, 1]-cam_b[0, 1], point_b[0]*cam_b[2, 2]-cam_b[0, 2]], [point_b[1]*cam_b[2, 0]-cam_b[1, 0], point_b[1]*cam_b[2, 1]-cam_b[1, 1], point_b[1]*cam_b[2, 2]-cam_b[1, 2]] ]) # build B vector mat_b = np.matrix([ [-(point_a[0]*cam_a[2, 3]-cam_a[0, 3])], [-(point_a[1]*cam_a[2, 3]-cam_a[1, 3])], [-(point_b[0]*cam_b[2, 3]-cam_b[0, 3])], [-(point_b[1]*cam_b[2, 3]-cam_b[1, 3])] ]) # solve for X _, x = cv2.solve(mat_a, mat_b, None, cv2.DECOMP_SVD) return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_lk():\n\n # get current directory to work relative to current file path\n curdir = os.path.dirname(__file__)\n\n # Load configuration for system\n yaml_file = os.path.join(curdir, 'config.yaml')\n with open(yaml_file, \"r\") as f:\n config = yaml.load(f)\n\n # extract list of videos from data dir\n vid_dir = os.path.join(curdir, config['traindir'])\n vid_names = util.load_data(vid_dir)\n\n # extract background subtraction image from bg vid\n bg_file = os.path.join(curdir, config['bg_img'])\n bg_valid, bg_video, bg_frame = util.load_video(bg_file)\n\n valid, video, frame = util.load_video(vid_names[1])\n init_frame = frame[40: 680, 70: 1210]\n\n valid, next_frame = video.read()\n orig_next_frame = next_frame.copy()\n next_frame = next_frame[40: 680, 70: 1210]\n\n # rescale to gray\n if len(init_frame.shape) > 2:\n init_frame = cv2.cvtColor(init_frame, cv2.COLOR_BGR2GRAY)\n if len(next_frame.shape) > 2:\n next_frame = cv2.cvtColor(next_frame, cv2.COLOR_BGR2GRAY)\n\n _, mask = util.background_subtraction(init_frame, bg_frame, thresh=0.25)\n mask[:140, :] = 0\n mask[520:, :] = 0\n mask[:, 150: 220] = 0\n mask[:, :100] = 0\n mask[:, 1000:] = 0\n elem = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n dilated_mask = cv2.dilate(mask, kernel=elem)\n\n # lk = LK()\n # lk.process_frame(init_frame, next_frame, bg_file=bg_file)\n\n custom_lk = CustomLK()\n\n writer = cv2.VideoWriter('output.avi', -1, 20, (1140, 640))\n\n frame_num = 1\n\n while valid:\n\n print(\"Frame:\", frame_num)\n\n u, v, img, next_frame = custom_lk.hierarchical_lk(img_a=init_frame,\n img_b=next_frame,\n orig_b=orig_next_frame,\n levels=5,\n k_size=8,\n k_type=\"uniform\",\n sigma=0,\n interpolation=cv2.INTER_CUBIC,\n border_mode=cv2.BORDER_REPLICATE,\n mask=dilated_mask)\n\n cv2.imshow('img.png', img)\n cv2.waitKey(10)\n\n # writer.write(img)\n\n init_frame = next_frame.copy()\n valid, next_frame = video.read()\n orig_next_frame = next_frame.copy()\n next_frame = next_frame[40: 680, 70: 1210]\n next_frame = cv2.cvtColor(next_frame, cv2.COLOR_BGR2GRAY)\n\n frame_num += 1\n\n writer.release()", "def run(self):\n cap = cv2.VideoCapture(0)\n\n while True:\n ret, frame = cap.read()\n try:\n name = self.recog(frame)\n boxes, probs, landmarks = mtcnn.detect(frame, landmarks=True)\n if self.last_box is not None:\n # print('last_box: ', self.last_box)\n cx_0, cy_0 = (self.last_box[0][0] + self.last_box[0][2]) // 2, (self.last_box[0][1] + self.last_box[0][3]) // 2\n cx_1, cy_1 = (boxes[0][0] + boxes[0][2]) // 2, (boxes[0][1] + boxes[0][3]) // 2\n w_0, h_0 = self.last_box[0][2] - self.last_box[0][0], self.last_box[0][3] - self.last_box[0][1]\n w_1, h_1 = boxes[0][2] - boxes[0][0], boxes[0][3] - boxes[0][1]\n\n factor_center = 0.3\n new_cx = cx_0 + factor_center * (cx_1 - cx_0)\n new_cy = cy_0 + factor_center * (cy_1 - cy_0)\n\n factor_hw = 0.3\n new_w = w_0 + factor_hw * (w_1 - w_0)\n new_h = h_0 + factor_hw * (h_1 - h_0)\n\n boxes = [[int(new_cx - new_w // 2), int(new_cy - new_h // 2),\n int(new_cx + new_w // 2), int(new_cy + new_h // 2)]]\n\n self.last_box = boxes\n\n # draw on frame\n self._draw(frame, boxes, probs, landmarks, name)\n print(name)\n # draw on frame\n\n except:\n pass\n\n # Show the frame\n cv2.imshow('Face Detection', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()", "def detection_cam(network_path, xml_path):\n\n files = os.listdir(network_path)\n\n networks = [load_network(network_path + files[k]) for k in range(len(files))]\n\n cap = cv2.VideoCapture(0)\n\n known_images = load_vector_database(\"P:/coding_weeks/machine_learning/repo/database/training_database.vdb\")\n\n known_labels = []\n\n for label in known_images:\n known_labels.append(label)\n\n while True:\n # Capture image par image\n ret, frame = cap.read()\n\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n boxes, faces = face_detection(rgb, xml_path)\n\n names = []\n\n for face in faces:\n face = cv2.resize(face, (128, 128))\n face = cv2.cvtColor(face, cv2.COLOR_RGB2GRAY)\n vector_list = hog(face, orientations=8, pixels_per_cell=(8, 8), cells_per_block=(1, 1))\n\n vector = numpy.zeros((len(vector_list), 1))\n\n for k in range(len(vector_list)):\n vector[k, 0] = vector_list[k]\n\n # guess = network.forward_propagation(vector)\n #\n # max_index = 0\n # max_value = guess[0, 0]\n #\n # for k in range(len(known_labels)):\n # if guess[k, 0] > max_value:\n # max_index = k\n # max_value = guess[k, 0]\n #\n # if max_value < 0.3:\n # names.append(\"UNKNOWN\" + str(max_value))\n #\n # else:\n # names.append(known_labels[max_index] + str(max_value))\n #\n # print(\"GUESS {} | TRUSTED {}\".format(known_labels[max_index], str(100.0 * max_value)[:5]))\n\n labels = []\n\n for network in networks:\n guess = network.forward_propagation(vector)\n\n max_index = 0\n max_value = guess[0, 0]\n\n for k in range(len(known_labels)):\n if guess[k, 0] > max_value:\n max_index = k\n max_value = guess[k, 0]\n\n labels.append(known_labels[max_index])\n\n labels.sort()\n\n d = {}\n\n for label in labels:\n if label not in d:\n d[label] = 1\n else:\n d[label] += 1\n\n max = 0\n label = \"\"\n\n for l in d:\n if d[l] > max:\n max = d[l]\n label = l\n\n if max >= 0.8 * len(files):\n names.append(label)\n else:\n names.append(\"UNKNOWN\")\n\n for ((x_beginning, y_beginning, face_width, face_height), name) in zip(boxes, names):\n cv2.rectangle(frame, (x_beginning, y_beginning), (x_beginning + face_width, y_beginning + face_height), (0, 255, 0), 2)\n\n cv2.putText(frame, name, (x_beginning, y_beginning), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)\n\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()", "def yolo_detection(raw_image):\n class_ids = []\n confidences = []\n boxes = []\n height , width ,c= raw_image.shape\n blob = cv2.dnn.blobFromImage(raw_image, 0.00392, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.4:\n center_x = int(detection[0]*width)\n center_y = int(detection[1]*height)\n w = int(detection[2]*width)\n h = int(detection[3]*height)\n ##Rectangle Draw\n topleft_x = int(center_x-(w/2))\n topleft_y = int(center_y-(h/2))\n\n boxes.append([topleft_x,topleft_y,w,h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n #DISPLAY DETECTION\n total_detections = len(boxes)\n for i in range(total_detections):\n if i in indexes:\n topleft_x, topleft_y, w,h = boxes[i]\n label = detection_classes[class_ids[i]]\n cv2.rectangle(raw_image, (topleft_x,topleft_y), (topleft_x+w,topleft_y+h), (0,100,255), 1)\n cv2.putText(raw_image, label, (topleft_x, topleft_y),cv2.FONT_HERSHEY_COMPLEX,1,(0,165,255))\n\n\n return raw_image", "def demo(net, image_name):\n # Load the demo image\n im_file = os.path.join(im_path, image_name)\n timer = Timer()\n timer.tic()\n im = cv2.imread(im_file)\n timer.toc()\n print ('reading image took {:.3f}s for detection').format(timer.total_time)\n crop_size=6000 #裁减图像大小\n crop_overlap=100 #裁减图像的重叠区域\n # ipdb.set_trace()\n if im.shape[0]>crop_size and im.shape[1]>crop_size:\n index=crop_im(crop_size,crop_overlap,im)\n all_dets=[[]for _ in xrange(2)] \n #print index\n for im_index in range(0,len(index)): \n start_x=index[im_index][0][0]\n start_y=index[im_index][0][1]\n end_x=index[im_index][0][2]\n end_y=index[im_index][0][3] \n scores, boxes = im_detect(net, im[start_x:end_x,start_y:end_y])\n \n # skip j = 0, because it's the background class\n for class_index in xrange(1, 2):\n inds = np.where(scores[:, class_index] > CONF_THRESH[class_index-1])[0] #confidence thresh\n if len(inds)==0:\n continue\n # from ipdb import set_trace\n # set_trace() \n cls_scores = scores[inds, class_index]\n #cls_boxes = boxes[inds, class_index * 4:(class_index + 1) * 4]\n cls_boxes = boxes[inds, 4:8]\n #from ipdb import set_trace\n #set_trace() \n ###函数im_detect的输出是什么样的?这里为啥要乘上4???????????\n cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\n .astype(np.float32, copy=False)\n #后处理函数\n #cls_dets=postprocess(cls_dets,del_theta) \n #softnms,如果不使用该方法可以注释掉,这个是faster自带的softnms,但是\n #它是将所有类不加区分放在一起进行softnms,而且所有的类共用一个置信概率 \n #keep = soft_nms(cls_dets, sigma=0.5, Nt=0.3, threshold=0.001, method=2)\n #2是高斯,1是线性,设其他是nms\n #nms,如果不使用该方法也注释掉,它和soft_nms二选一\n #from ipdb import set_trace\n #set_trace() \n #keep = nms(cls_dets, NMS_THRESH[class_index-1]) #nms thresh\n #cls_dets = cls_dets[keep, :]\n ##index的每一行的结构((start_x,start_y,end_x,end_y),h_num*(j-1)+k)\n cls_dets[:,:1]=(cls_dets[:,:1]+index[im_index][0][1])\n cls_dets[:,1:2]=(cls_dets[:,1:2]+index[im_index][0][0])\n cls_dets[:,2:3]=(cls_dets[:,2:3]+index[im_index][0][1])\n cls_dets[:,3:4]=(cls_dets[:,3:4]+index[im_index][0][0])\n all_dets[class_index].append(cls_dets.tolist())\n \n # from ipdb import set_trace\n # set_trace() \n for j in xrange(1, 2):\n if len(all_dets[j])==0:\n continue\n whole_dets=np.vstack(all_dets[j])\n \n \n ##后处理1\n # keep2=postprocess(whole_dets,del_theta,del_theta_p)#1111111111111\n \n \n #keep = soft_nms(whole_dets, sigma=0.5, Nt=0.3, method=2, threshold=0.001) \n ##后处理2,一般NMS,上面用的是soft-NMS\n whole_dets=whole_dets.astype(np.float32, copy=False)\n keep = nms(whole_dets, NMS_THRESH[class_index-1]) #111111111111\n #whole_dets=all_dets_pos[keep]#11111111111111111\n ##后处理3\n # whole_dets1=all_dets_pos[keep]\n # ind=postprocess2(whole_dets1,del_theta2[j-1])\n whole_dets=whole_dets[keep] \n \n ##把最终结果按得分排序,不需要所以注释掉\n # a_arg=np.argsort(-whole_dets[:,4])\n # whole_dets=whole_dets[a_arg] #rank\n\n if os.path.exists(result_path):\n pass\n else:\n os.mkdir(result_path)\n file1=open(result_path+'det_test_'+CLASSES[j]+'.txt','a')\n for i in range(whole_dets.shape[0]):\n bbox = tuple(int(np.round(x)) for x in whole_dets[i, :4])\n score = whole_dets[i, -1]\n \n ##画图\n if score>0.5:\n cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)\n cv2.putText(im, '%s: %.3f' % (CLASSES[j], score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,\n 1.0, (0, 0, 255), thickness=1)\n\n # if image_name.find('.tiff') == -1: # this img is png or tif\n # im_name=image_name[:-4]\n # else: #this img is tiff\n # im_name=image_name[:-5] \n line=image_name+' '+str(score)+' '+str(bbox[0])+' '+str(bbox[1])+' '+str(bbox[2])+' '+str(bbox[3])+'\\n'\n file1.write(line)\n\t\t\t\t#file1.write(line)\n file1.close()\n else:\n scores, boxes = im_detect(net, im)\n # from ipdb import set_trace\n # set_trace() \n for class_index in xrange(1, 2):\n #print(class_index)\n inds = np.where(scores[:, class_index] > CONF_THRESH[class_index-1])[0] #confidence thresh\n if len(inds)==0:\n continue\n #############################\n #print(inds)\n ###############################\n cls_scores = scores[inds, class_index]\n cls_boxes = boxes[inds, 4:8]\n cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\n .astype(np.float32, copy=False)\n # keep2=postprocess(cls_dets,del_theta,del_theta_p)\n # all_dets_pos=cls_dets[keep2]\n #keep = soft_nms(cls_dets, sigma=0.5, Nt=0.3, method=2, threshold=0.001) \n keep = nms(cls_dets, NMS_THRESH[class_index-1]) #nms thresh\n cls_dets = cls_dets[keep]\n \n # ind=postprocess2(cls_dets,del_theta2[class_index-1])\n # cls_dets=cls_dets[ind]\n # a_arg=np.argsort(-cls_dets[:,4])\n # cls_dets=cls_dets[a_arg]\n\n if os.path.exists(result_path):\n pass\n else:\n os.mkdir(result_path)\n \n file1=open(result_path+'det_test_'+CLASSES[class_index]+'.txt','a')\n for i in range(cls_dets.shape[0]):\n bbox = tuple(int(np.round(x)) for x in cls_dets[i, :4])\n score = cls_dets[i, -1]\n if score>0.5:\n cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)\n cv2.putText(im, '%s: %.3f' % (CLASSES[class_index], score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,\n 1.0, (0, 0, 255), thickness=1)\n # if image_name.find('.tiff') == -1: # this img is png or tif\n # im_name=image_name[:-4]\n # else: #this img is tiff\n # im_name=image_name[:-5] \n \n line=im_name+' '+str(score)+' '+str(bbox[0])+' '+str(bbox[1])+' '+str(bbox[2])+' '+str(bbox[3])+'\\n'\n file1.write(line)\n file1.close()\n \n \n if os.path.exists(save_path):\n pass\n else:\n os.mkdir(save_path) \n cv2.imwrite(os.path.join(save_path+'/'+image_name),im)", "def __init__(self):\n self.lk_params = dict( winSize = (15,15),\\\n maxLevel = 2,\\\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.1))\n\n self.prev = np.empty((0,0), dtype = np.int8)", "def main():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-hgt\", \"--imgHeight\", help=\"The height of the images, default=720.\",\n type=int, default=720)\n\n parser.add_argument(\"-wd\", \"--imgWidth\", help=\"The width of the images, default=1280.\",\n type=int, default=1280)\n\n parser.add_argument(\"-r\", \"--chessboardRows\", help=\"The rows of the chessboard calibration images, default=6.\",\n type=int, default=6)\n\n parser.add_argument(\"-c\", \"--chessboardCols\", help=\"The cols of the chessboard calibration images, default=9.\",\n type=int, default=9)\n\n parser.add_argument(\"-cp\", \"--calibrationPath\", help=\"The height of the images, default=720.\",\n type=str, default='')\n\n parser.add_argument(\"-in\", \"--inputVideoPath\", help=\"The path to the input video to be processed.\",\n type=str, default='')\n\n parser.add_argument(\"-out\", \"--outputVideoPath\", help=\"The path to the where to store output video.\",\n type=str, default='')\n\n args = parser.parse_args()\n\n print(args)\n\n assert args.calibrationPath != '', \"The path to calibration images can't be empty\"\n assert args.inputVideoPath != '', \"The path to input video can't be empty\"\n assert args.outputVideoPath != '', \"The path to output video can't be empty\"\n\n camera_mtx, dist_coeff = CameraCalibration((args.imgHeight, args.imgWidth),\n (args.chessboardRows, args.chessboardCols),\n args.calibrationPath).calibrate()\n print(\"Camera Mtx\", camera_mtx)\n print(\"Distortion Coefficient\", dist_coeff)\n # img = cv2.imread('test_images/test5.jpg')\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n AdvancedLaneDetection(args.inputVideoPath, camera_mtx, dist_coeff).process_video(args.outputVideoPath)\n\n # cv2.imwrite(\"output.jpg\", result)", "def demo(net, image_name, classes):\n\n # Load pre-computed Selected Search object proposals\n # box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',image_name + '_boxes.mat')\n test_mats_path = '/home/tanshen/fast-rcnn/data/kaggle/test_bbox'\n box_file = os.path.join(test_mats_path ,image_name + '_boxes.mat')\n obj_proposals = sio.loadmat(box_file)['boxes']\n\n # Load the demo image\n test_images_path = '/home/tanshen/fast-rcnn/data/kaggle/ImagesTest'\n # im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')\n im_file = os.path.join(test_images_path, image_name + '.jpg')\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im, obj_proposals)\n timer.toc()\n # print ('Detection took {:.3f}s for '\n # '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0\n NMS_THRESH = 0.3\n max_inds = 0\n max_score = 0.0\n for cls in classes:\n cls_ind = CLASSES.index(cls)\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n keep = np.where(cls_scores >= CONF_THRESH)[0]\n cls_boxes = cls_boxes[keep, :]\n cls_scores = cls_scores[keep]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n # print 'All {} detections with p({} | box) >= {:.1f} in {}'.format(cls, cls,\n # CONF_THRESH, image_name)\n #if get_max!=[]: \n\n [ind,tmp]=get_max(im, cls, dets, thresh=CONF_THRESH)\n #print image_name,cls,tmp\n\n #vis_detections(im, cls, dets, image_name, thresh=CONF_THRESH)\n #print dets[:,-1]\n #print image_name,max_score\n file.writelines([image_name,'\\t',cls,'\\t',str(tmp),'\\n'])\n if(max_score<tmp):\n max_score=tmp\n cls_max=cls\n print image_name,cls_max,max_score", "def do_stuff(self, net, meta):\n cv2_img = self.img_to_cv2(self.last_img)\n # Now we can use cv2 functions as the image is <type 'numpy.ndarray'>\n # rospy.loginfo(\"cv2_img: \" + str(type(cv2_img)))\n # Your OpenCV stuff\n # cv2_img = cv2.resize(cv2_img, (0,0), fx=0.25, fy=0.25) \n\n (rows,cols,channels) = cv2_img.shape\n # if cols > 60 and rows > 60 :\n # cv2.circle(cv2_img, (50,50), 10, 255)\n \n global x_old\n global no_meas_counter\n global est\n global cor\n global w\n global h\n \n\n r = darknet.detect(net, meta, cv2_img)\n # print(r)\n\n if not r:\n no_meas_counter += 1\n\n for i in r:\n if i[0].decode() == \"person\":\n x, y, w, h = i[2][0], i[2][1], i[2][2], i[2][3]\n xmin, ymin, xmax, ymax = darknet.convertBack(float(x), float(y), float(w), float(h))\n pt1 = (xmin, ymin)\n pt2 = (xmax, ymax)\n cv2.rectangle(cv2_img, pt1, pt2, (0, 255, 0), 2)\n cv2.putText(cv2_img, i[0].decode() + \" [\" + str(round(i[1] * 100, 2)) + \"]\", (pt1[0], pt1[1] + 20), cv2.FONT_HERSHEY_SIMPLEX, 1, [0, 255, 0], 4)\n \n global mp\n mp = np.array([[np.float32(x)],[np.float32(y)]])\n cor = kalman.correct(mp)\n no_meas_counter = 0\n\t\t\n\n else:\n no_meas_counter += 1\n \n # x_old = x\n\n # cv2.imshow(\"cv2_img\", cv2_img)\n # k = cv2.waitKey(1)\n # if k == 27:\n # cv2.destroyAllWindows()\n # exit()\n\n if no_meas_counter < 30:\n est = kalman.predict()\n msg = PolygonStamped()\n msg.header.stamp = rospy.Time.now()\n # msg.polygon.points = [Point32(x=x, y=y), Point32(x=cols, y=rows), Point32(x=w, y=h)]\n msg.polygon.points = [Point32(x=est[0], y=est[1]), Point32(x=cols, y=rows), Point32(x=w, y=h)] \n self.pub_yolo_detection.publish(msg)\n\n # cv2.imshow(\"Image window\", cv2_img)\n # cv2.waitKey(3)\n\n self.pub_images(cv2_img)\n self.is_new_img = False", "def predict_again(src):\n global rcnt\n global lcnt\n H,W = src.shape[:2]\n #cv2.imshow(\"cROPPPPPPED\",src)\n #print (src.shape)\n\n img1 = src[:,:int(W/2)]\n img2 = src[:,int(W/2)+1:]\n contoured1,area1 = drawContours11111(img1)\n contoured2,area2 = drawContours11111(img2)\n #cv2.imshow(\"blank_image\",contoured1)\n #cv2.imshow(\"blank_image1\",contoured2)\n print (area1,area2)\n if area2>area1:\n #print (\"New:::::::::::::RIGGGGGGGGGHT\")\n if rcnt >=3:\n print (\"New:::::::::::::RIGGGGGGGGGHT\")\n feedback.direction = 1\n feedback.detection = 1\n rcnt += 1\n lcnt = 0\n elif area1>area2:\n #print (\"New:::::::::::::LEFTTTTTTTTT\")\n if lcnt >=3:\n print (\"New:::::::::::::LEFTTTTTTTTT\")\n feedback.direction = -1\n feedback.detection = 1\n lcnt += 1\n rcnt = 0", "def main():\n \n #\n # Initialization\n #\n ref_time = time.time()\n output_string = '' \n cv2.namedWindow('frame', cv2.WINDOW_GUI_NORMAL+cv2.WINDOW_AUTOSIZE)\n \n #\n # Open the capture device and print some\n # useful properties\n #\n cap = cv2.VideoCapture(0)\n if cap.isOpened():\n #cap.set(cv.CV_CAP_PROP_FRAME_WIDTH, 320)\n #cap.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 240)\n \n frameWidth = cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)\n frameHeight = cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)\n \n print 'frame: width {}, height {}'.format(frameWidth, frameHeight)\n\n #\n # Parameters for Lucas-Kanade optical flow\n #\n lk_params = dict( winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n #\n # Predefine points to track\n #\n track_points = np.array([[[220.0, 120.0]],\n [[220.0, 200.0]],\n [[220.0, 280.0]],\n [[220.0, 360.0]],\n [[420.0, 120.0]],\n [[420.0, 200.0]],\n [[420.0, 280.0]],\n [[420.0, 360.0]]], 'float32')\n \n #\n # Take first frame and find corners in it\n #\n cap_ok, frame = cap.read()\n if not cap_ok:\n sys.exit()\n\n prev_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n print 'rel_time,p0dx,p0dy,p1dx,p1dy,p2dx,p2dy,p3dx,p3dy,p4dx,p4dy,p5dx,p5dy,p6dx,p6dy,p7dx,p7dy'\n\n while(True):\n\n cap_ok, frame = cap.read()\n if not cap_ok:\n break\n \n curr_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n #\n # Calculate optical flow\n #\n next_points, st, err = cv2.calcOpticalFlowPyrLK(prev_frame, curr_frame, track_points, None, **lk_params)\n\n #\n # Iterate through points and display on video frame\n # as well as output a CSV formated value list\n #\n for point_index in range(0, track_points.shape[0]):\n \n #\n # Display results on video frame\n #\n track_point = np.int0(track_points[point_index])\n x0,y0 = track_point.ravel()\n cv2.circle(frame, (x0,y0), 5, (0,255,0), -1)\n\n next_point = np.int0(next_points[point_index])\n x1,y1 = next_point.ravel()\n cv2.circle(frame, (x1,y1), 5, (0,0,255), -1)\n\n #\n # Build CSV string\n #\n output_string += ',{:.2f},{:.2f}'.format(x0-x1, y0-y1)\n \n #\n # Print out some data in a CSV format for graphing\n #\n now = time.time() - ref_time \n print '{:.2f}{}'.format(now, output_string)\n output_string = ''\n\n #\n # Display result and check for escape key\n #\n cv2.imshow('frame',frame)\n k = cv2.waitKey(1) & 0xff\n if k == 27:\n break\n\n #\n # Now update the previous frame and previous points\n #\n prev_frame = curr_frame.copy()\n\n cv2.destroyAllWindows()\n cap.release()", "def get_light_state(self):\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n #Get classification\n tl_image_rgb, color_index = self.light_classifier.get_classification(cv_image)\n tl_cv_image = cv2.cvtColor(tl_image_rgb, cv2.COLOR_RGB2BGR)\n try:\n self.tl_detected_image_pub.publish(self.bridge.cv2_to_imgmsg(tl_cv_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)", "def test(one=True, training=True, detector_debug=False):\n total = 0\n imgs = []\n if one:\n\n print(\"Detecting:\")\n file_sign = \"./Data/Reglamentarias/STC-RG-3.jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=True, debug=detector_debug)\n s, th = d.detect()\n seg = Segmenter(s)\n\n seg.keypoints()\n seg.descriptors()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n if training:\n print (\"Training\")\n for imagePath in paths.list_images(\"./training/PV/\"):\n print (imagePath)\n sign = cv2.imread(imagePath, 1)\n\n seg = Segmenter(sign)\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n else:\n if not one:\n for i in range(1, 90):\n # file = \"./Data/Preventivas/STC-PV-\"+str(i)+\".jpg\"\n file_sign = \"./Data/Reglamentarias/STC-RG-\" + str(i) + \".jpg\"\n # file = \"./Data/Mixtas/STC-MX-\"+ str(i) +\".jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=False)\n s, th = d.detect()\n if s is not None:\n total += 1\n imgs.append((i, s, th))\n\n print (\"Detected:\", str(total))\n\n for i in range(1, len(imgs)-1):\n seg = Segmenter(imgs[i][1])\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"img\"+str(imgs[i][0]), res)\n print (str(imgs[i][0]))\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def laser_detector(out_l_x, out_l_y):\n global screen_x_long\n global screen_y_long\n while True:\n ret, frame2 = cap.read()\n time.sleep(0.5)\n crop_img2 = frame2[SCREEN_Y_TOP:SCREEN_Y_BOT, SCREEN_X_TOP:SCREEN_X_BOT]\n hsv_image2 = cv2.cvtColor(crop_img2, cv2.COLOR_BGR2HSV)\n laser(hsv_image2)\n laser_str_el = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))\n laser_str_el_2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n laser_close_morphed = cv2.morphologyEx(channels['laser'],\n cv2.MORPH_CLOSE,\n laser_str_el\n )\n laser_morphed = cv2.morphologyEx(laser_close_morphed,\n cv2.MORPH_OPEN,\n laser_str_el_2\n )\n\n blur = cv2.GaussianBlur(laser_morphed, (7, 7), 4, 4)\n\n lasers = cv2.HoughCircles(blur, cv.CV_HOUGH_GRADIENT, 2.5, 720 / 2,\n param1=10, param2=4, minRadius=4,\n maxRadius=10\n )\n if lasers is not None:\n lasers = np.uint16(np.around(lasers))\n for i in lasers[0, :]:\n print \"lasers!\"\n # draw the outer circle\n cv2.circle(crop_img, (i[0], i[1]), i[2], (0, 255, 0), 2)\n # draw the center of the circle\n cv2.circle(crop_img, (i[0], i[1]), 2, (0, 0, 255), 3)\n x_l = ((i[0]) / screen_x_long) * WIDTH\n y_l = HEIGHT - (((i[1]) / screen_y_long) * HEIGHT)\n if laserT:\n out_l_x.put(x_l)\n out_l_y.put(y_l)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n print \"Exiting Background Thread: Laser detector\"", "def find_initial_position(img1, img2):\n # find points of interest in points\n img1_kp, img1_des = compute_orb(img1)\n img2_kp, img2_des = compute_orb(img2)\n\n # get closest 2 matches per point\n bf = cv2.BFMatcher(normType=cv2.NORM_HAMMING)\n matches = bf.knnMatch(img1_des, img2_des, k=2)\n\n good_matches = []\n pts1 = []\n pts2 = []\n # Lowe's ratio test\n for m, n in matches:\n if m.distance < 0.75*n.distance:\n good_matches.append(m)\n pts1.append(img1_kp[m.queryIdx].pt)\n pts2.append(img2_kp[m.trainIdx].pt)\n\n pts1 = np.float32(pts1)\n pts2 = np.float32(pts2)\n\n # essential matrix gives the motion of the points\n # to get motion of the camera, flip the inputs between pts1 and pts2\n essential_matrix, e_mask = cv2.findEssentialMat(pts2, pts1, intrinsic_camera_matrix)\n\n # select only inlier points as per the RANSAC method\n pts1 = pts1[e_mask.ravel() == 1]\n pts2 = pts2[e_mask.ravel() == 1]\n\n _, rotation, translation, mask, triangulated_points = cv2.recoverPose(essential_matrix, pts2, pts1, intrinsic_camera_matrix, distanceThresh=50)\n triangulated_points = np.asarray([np.divide(triangulated_points[0], triangulated_points[3]),\n np.divide(triangulated_points[1], triangulated_points[3]),\n np.divide(triangulated_points[2], triangulated_points[3])]).transpose()\n\n CAMERA_POSES.clear()\n CAMERA_POSES.append(np.hstack((np.identity(3), np.array([[0], [0], [0]]))))\n CAMERA_POSES.append(np.hstack((rotation, translation)))\n return rotation, translation, triangulated_points", "def detect_and_visualize(self, imgname, root_dir=None, extension=None,\n classes=[], thresh=0.6, show_timer=False):\n if imgname.endswith(\".png\") or imgname.endswith(\".jpg\"):\n img = cv2.imread(imgname)\n # dets, seg = self.im_detect(imgname, root_dir, extension, show_timer=show_timer)\n dets, seg = self.im_detect_single(img, show_timer=show_timer)\n det = dets[0]\n if self.data_shape[1]==320:\n img = cv2.resize(img, (640, 320))\n img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)]\n # idx = nms.nms(np.hstack((det[:,2:6],det[:,1:2])),.9)\n # det=det[idx,:]\n print(det[:5,:])\n self.visualize_detection(img, det, seg, classes, thresh)\n cv2.waitKey()\n elif imgname.endswith(\".mp4\") or imgname.endswith(\".avi\") or imgname.isdigit():\n cap = cv2.VideoCapture(int(imgname) if imgname.isdigit() else imgname)\n while 1:\n tic = time.time()\n _, img = cap.read()\n if img is None: break\n img, im_scale = resize(img, 600, 1024)\n if math.fabs(float(img.shape[1])/float(img.shape[0])-2.)>.01:\n # img = img[32:512+32,:,:]\n img = img[32+32:512+64,:,:]\n if self.data_shape[1]==320:\n img = cv2.resize(img, (640, 320))\n tic0 = time.time()\n dets = self.im_detect_single(img, show_timer=True)\n toc0 = time.time()\n det = dets[0]\n img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)]\n idx = nms.nms(np.hstack((det[:,2:6],det[:,1:2])),.95)\n det=det[idx,:]\n self.visualize_detection(img, det, None, classes, thresh)\n toc = time.time()\n # print(\"%.1ffps, %.1fms, %.1fms\"%(1./(toc-tic),(toc0-tic0)*1000.,(toc-tic)*1000.,))\n if cv2.waitKey(1)&0xff==27:\n break\n else:\n raise IOError(\"unknown file extention, only .png/.jpg/.mp4/.avi files are supported.\")", "def up_to_step_4(imgs):\n # ... your code here ...\n for i in range(len(imgs)-1):\n \n detector = cv2.xfeatures2d.SURF_create(hessianThreshold = 3000,\n nOctaves = 4,\n nOctaveLayers = 3,\n upright = False,\n extended = False)\n gray1= cv2.cvtColor(imgs[i],cv2.COLOR_BGR2GRAY)\n kp1,des1 = detector.detectAndCompute(gray1,None)\n gray2= cv2.cvtColor(imgs[i+1],cv2.COLOR_BGR2GRAY)\n kp2,des2 = detector.detectAndCompute(gray2,None)\n# bf = cv2.BFMatcher()\n matches = knnmatch(des2,des1)\n# good = []\n# for m,n in matches:\n# if m.distance < 0.75*n.distance:\n# good.append(m)\n# \n src_pts = np.float32([ kp2[m.queryIdx].pt for m in matches ])\n dst_pts = np.float32([ kp1[m.trainIdx].pt for m in matches ])\n H = findhomography(src_pts, dst_pts, 3000)\n# H,mask = cv2.findHomography(src_pts,dst_pts,cv2.RANSAC)\n # warp = warpperspective(imgs[0],H)\n warp = cv2.warpPerspective(imgs[i+1], H, (imgs[i+1].shape[1]*2 , imgs[i+1].shape[0]*2))\n rows, cols = np.where(warp[:,:,0] !=0)\n min_row, max_row = min(rows), max(rows) +1\n min_col, max_col = min(cols), max(cols) +1\n result = warp[min_row:max_row,min_col:max_col,:]\n # imgs = warp\n # warp[0:imgs[0].shape[0], 0:imgs[0].shape[1]] = imgs[2]\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch((imgs[i],result))\n imgs[i+1] = result[1]\n imgs[0] = imgs[-2]\n return imgs[0]", "def demo(net, data_dir, imgfile, out_dir):\n\n # Load the demo image\n im_file = os.path.join(data_dir, imgfile)\n im = cv2.imread(im_file)\n\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n scores = np.squeeze(scores)\n timer.toc()\n print ('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0.12\n NMS_THRESH = 0.3\n color_white = (0, 0, 0)\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 \n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))\n inds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n bbox = map(int, bbox)\n cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=4)\n cv2.putText(im, '%s %.3f' % (cls, score), (bbox[0], bbox[1] + 15),\n color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)\n return im", "def demo(net, image_name,num_class,save_ff):\r\n\r\n # Load the demo image\r\n #im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\r\n im_file=image_name\r\n im = cv2.imread(im_file)\r\n\r\n # Detect all object classes and regress object bounds\r\n timer = Timer()\r\n timer.tic()\r\n #for zzz in range(100):\r\n scores, boxes = im_detect(net, im)\r\n timer.toc()\r\n print ('Detection took {:.3f}s for '\r\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\r\n\r\n # Visualize detections for each class\r\n CONF_THRESH = 0.35\r\n NMS_THRESH = 0.3\r\n thresh=CONF_THRESH\r\n for cls_ind, cls in enumerate(range(num_class)):#CLASSES[1:]\r\n cls_ind += 1 # because we skipped background\r\n # cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\r\n # cls_scores = scores[:, cls_ind]\r\n # dets = np.hstack((cls_boxes,\r\n # cls_scores[:, np.newaxis])).astype(np.float32)\r\n inds = np.where(scores[:, cls_ind] > thresh)[0]\r\n cls_scores = scores[inds, cls_ind]\r\n if cfg.TEST.AGNOSTIC:\r\n cls_boxes = boxes[inds, 4:8]\r\n else:\r\n cls_boxes = boxes[inds, cls_ind*4:(cls_ind+1)*4]\r\n dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\r\n .astype(np.float32, copy=False)\r\n keep = nms(dets, NMS_THRESH)\r\n dets = dets[keep, :]\r\n #vis_detections(im, cls, dets, thresh=CONF_THRESH)\r\n inds = np.where(dets[:, -1] >= thresh)[0]\r\n if len(inds) == 0:\r\n continue\r\n\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n for i in inds:\r\n bbox = dets[i, :4]\r\n score = dets[i, -1]\r\n print bbox,score,cls\r\n cv2.rectangle(im_tmp, (bbox[0],bbox[1]), (bbox[2],bbox[3]), (0,0,255),2)\r\n #save_ff=\"/storage2/liushuai/faster_rcnn/FasterRCNN-Encapsulation-Cplusplus/faster_cxx_lib_ev2641/test_result.jpg\"\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n cv2.imwrite(save_ff,im_tmp)\r\n #save_pic(im, cls, dets, thresh=CONF_THRESH,save_ff)\r", "def demo(sess, net, image_name):\n # Load the demo image\n global CLASS_NAME\n global CHECK\n CHECK = 0\n # 读取的截图所在的位置\n # im_file = Cnn_path + \"data/VOCdevkit2007/VOC2007/JPEGImages/\" + image_name\n curpath = os.path.dirname(os.path.realpath(__file__))\n im_file = curpath + \"\\\\data\\\\VOCdevkit2007\\\\VOC2007\\\\JPEGImages\\\\\" + image_name\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n # score 阈值,最后画出候选框时需要,>thresh才会被画出\n CONF_THRESH = 0.5\n # 非极大值抑制的阈值,剔除重复候选框\n NMS_THRESH = 0.3\n # 利用enumerate函数,获得CLASSES中 类别的下标cls_ind和类别名cls\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n # 取出bbox ,score\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n # 将bbox,score 一起存入dets\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n # 进行非极大值抑制,得到抑制后的 dets\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n # 画框\n vis_detections(im, cls, dets, thresh=CONF_THRESH)\n if CHECK == 0:\n CLASS_NAME = \"None\"\n # im = im[:, :, (2, 1, 0)]\n # fig, ax = plt.subplots()\n # ax.imshow(im, aspect='equal')\n # ax.set_title(\"None\",fontsize=10)\n # plt.axis('off')\n # plt.tight_layout()\n # plt.draw()\n # RES[INDS.__getitem__(image_name.split(\"_\")[0])][INDS.__getitem__(CLASS_NAME)]+=1\n # plt.savefig(\"./output/\"+CLASS_NAME+\"_\" + image_name)\n # plt.savefig(\"./output/\" + image_name)\n MAX_SCORE[0] = 0.0", "def feature_extraction(img, feature):\n\n if feature == 'HoG':\n # HoG parameters\n\n # In the case of the Hog Feature, we already given the base parameters for using hog feature function.\n # TA - You can just use that parameter with each subdivide image (which has image grid size * image grid size)\n # Thank you for the reply. Does it mean to divide the image into 20x20 size sub-images and perform the feature extraction on each image??\n # TA - Yes. In the SIFT, image grid size is different.\n\n win_size = (32, 32)\n block_size = (32, 32)\n block_stride = (16, 16)\n cell_size = (16, 16)\n\n nbins = 9\n deriv_aperture = 1\n win_sigma = 4\n histogram_norm_type = 0\n l2_hys_threshold = 2.0000000000000001e-01\n gamma_correction = 0\n nlevels = 64\n\n # Your code here. You should also change the return value.\n\n # sample visualizing\n # cv2.imshow('img', img)\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n hog = cv2.HOGDescriptor(win_size,\n block_size,\n block_stride,\n cell_size,\n nbins,\n deriv_aperture,\n win_sigma,\n histogram_norm_type,\n l2_hys_threshold,\n gamma_correction,\n nlevels)\n\n # additional parameters\n\n #hist = hog.compute(gray,winStride,padding,locations)\n\n #TODO: Check if this is valid???\n\n hist = hog.compute(gray)\n hist_resized = np.resize(hist, (int(len(hist)/36), 36))\n hist_resized\n return hist_resized\n\n elif feature == 'SIFT':\n\n # Your code here. You should also change the return value.\n\n #input image size 240 * 200 ==> divide H, W by 20 ==> 12 * 10 = 120\n #in case of this input image, the number of feature is 120.\n #So the number of feature is changed according to input image size.\n\n #IF PROBLEMS WITH DEPENDENCIES: pip3 install opencv-contrib-python==3.4.2.16\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(gray, None)\n\n return des", "def detect_video(yolo_v3_model, video_path, batch_frames, output_path, train_input_size, classes_file_path, \n score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False, \n rectangle_colors = ''):\n \n # obtain number of classes\n num_of_classes = len(read_class_names(classes_file_path))\n \n # obtain VideoCapture object \n vid = cv2.VideoCapture(video_path)\n \n # obtain width, height and fps of video\n # by default VideoCapture returns float instead of int\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n\n # obtain video codec\n codec = cv2.VideoWriter_fourcc(*'XVID')\n \n # obtain output_path\n # output_path must be .mp4\n out = cv2.VideoWriter(output_path, codec, fps+1, (width, height)) \n\n # create list to store images\n images = []\n \n # variable to track frame\n frame = 0 \n \n while True:\n \n try:\n \n # grabs, decodes and returns the next video frame\n _, image = vid.read()\n \n # append original image to original_images list\n images.append(image[:])\n \n # increment frame\n frame += 1\n \n \n # if current frame is less than batch_frames\n if frame < batch_frames:\n \n # move to next frame \n continue\n \n # iterate over images in chronological order (last image is image of interest to put bbox)\n for x in range(batch_frames):\n \n # convert original image to grayscale \n image = cv2.cvtColor(images[-batch_frames + x + 1], cv2.COLOR_BGR2RGB)\n \n # preprocess image\n image = transform_images(image[:], train_input_size)\n \n # obtain concat frame if none exist\n if x == 0: \n \n concat_image = image[:]\n \n # concatenate subsequent frames to concat_image\n else:\n \n concat_image = np.concatenate((concat_image, image), axis = -1)\n \n except:\n \n break\n \n # add batch dimensions to concatenated image \n concat_image = concat_image[np.newaxis, ...].astype(np.float32)\n \n # create constant tensor from concatenated image and feed it to yolo_v3_model\n batched_input = tf.constant(concat_image)\n yolo_output = yolo_v3_model(batched_input)\n\n # list to store bboxes from respective scales\n pred_bbox = []\n\n # iterate over 3 scales\n for i in range(3):\n\n # decode resepctive yolo_output from each scale\n pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox, \n classes = num_of_classes, strides = strides, anchors = anchors, index = i)\n\n # append to pred_bbox\n pred_bbox.append(pred_result)\n \n # obtain results of shape (:, 5 + num_classes), i.e all bboxes\n pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\n \n # concatenate all bboxes from all scales\n pred_bbox = tf.concat(pred_bbox, axis = 0)\n\n # post process all bboxes using latest image in orignal_images\n bboxes = postprocess_boxes(pred_bbox, images[-1], train_input_size, score_threshold)\n\n # non maximal supression for bboxes\n bboxes = nms(bboxes, iou_threshold, method = 'nms')\n\n # draw bbox on latest image in orignal_images\n image = draw_bbox(images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)\n \n # save image frame to video path if path to save is given\n if output_path != '': out.write(image)\n \n # display image frame (i.e play video) if show is true \n if show:\n \n # show the image\n cv2.imshow('output', image)\n \n # if q key is presssed\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n \n # end session\n cv2.destroyAllWindows()\n \n # break out of while loop\n break\n \n # When everything done, release the capture\n vid.release()\n cv2.destroyAllWindows()", "def image_detect_and_compute_video(detector, img_name):\n img_building = cv2.cvtColor(img_name, cv2.COLOR_BGR2RGB)\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(img_building, None)\n img_kp = cv2.drawKeypoints(img_building, kp, img_building)\n return img_building, kp, des", "def predict_from_cv2(yolo, inputfilepath):\n\n print(\"call func of predict_from_cv2\")\n img = cv2.imread(inputfilepath)\n yolo_results = yolo.predict(img)\n for yolo_result in yolo_results:\n print(yolo_result.get_detect_result())", "def __stage2(self, img, total_boxes, stage_status: StageStatus):\r\n\r\n num_boxes = total_boxes.shape[0]\r\n if num_boxes == 0:\r\n return total_boxes, stage_status\r\n\r\n # second stage\r\n tempimg = np.zeros(shape=(24, 24, 3, num_boxes))\r\n\r\n for k in range(0, num_boxes):\r\n tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3))\r\n\r\n tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \\\r\n img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :]\r\n\r\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\r\n tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA)\r\n\r\n else:\r\n return np.empty(shape=(0,)), stage_status\r\n\r\n tempimg = (tempimg - 127.5) * 0.0078125\r\n tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))\r\n\r\n out = self._rnet.run(tempimg1)\r\n\r\n out0 = np.transpose(out[0])\r\n out1 = np.transpose(out[1])\r\n\r\n score = out1[1, :]\r\n\r\n ipass = np.where(score > self._steps_threshold[1])\r\n\r\n total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])\r\n\r\n mv = out0[:, ipass[0]]\r\n\r\n if total_boxes.shape[0] > 0:\r\n pick = self.__nms(total_boxes, 0.7, 'Union')\r\n total_boxes = total_boxes[pick, :]\r\n total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))\r\n total_boxes = self.__rerec(total_boxes.copy())\r\n\r\n return total_boxes, stage_status", "def vis_detections_video(im, class_name, dets, thresh=0.5):\n\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return im\n\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n cv2.rectangle(im,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),2)\n cv2.rectangle(im,(int(bbox[0]),int(bbox[1])-10),(int(bbox[0]+200),int(bbox[1])+10),(10,10,10),-1)\n cv2.putText(im,'{:s} {:.3f}'.format(class_name, score),(int(bbox[0]),int(bbox[1]-2)),cv2.FONT_HERSHEY_SIMPLEX,.45,(255,255,255))#,cv2.CV_AA)\n return im", "def get_classification(self, image):\n if self.correct_gamma:\n if self.gamma == 1.0:\n self.gamma = 0.6\n elif self.gamma == 0.6:\n self.gamma = 1.0\n image = self.adjust_gamma(image, self.gamma)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_np = np.asarray(image, dtype=\"uint8\")\n image_np_expanded = np.expand_dims(image_np, axis=0)\n\n detected = False\n\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n boxes = np.squeeze(boxes)\n classes = np.squeeze(classes).astype(np.int32)\n scores = np.squeeze(scores)\n best_scores = []\n\n for idx, classID in enumerate(classes):\n if self.MODEL_NAME == 'ssdlite_mobilenet_v2_coco_2018_05_09':\n if classID == 10: # 10 is traffic light\n if scores[idx] > 0.10: #confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n else: # we tuned the model to classify only traffic lights\n if scores[idx] > 0.10: # confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n\n tl_index = TrafficLight.UNKNOWN\n if detected:\n best_scores.sort(key=lambda tup: tup[0], reverse=True)\n\n best_score = best_scores[0]\n rospy.logdebug(\"number of TL found %d, best score: %f, color: %f\", len(best_scores), best_score[0], best_score[2])\n nbox = boxes[best_score[1]]\n\n height = image.shape[0]\n width = image.shape[1]\n\n box = np.array([nbox[0]*height, nbox[1]*width, nbox[2]*height, nbox[3]*width]).astype(int)\n box_height = box[2] - box[0]\n box_width = box[3] - box[1]\n ratio = float(box_height)/float(box_width)\n rospy.logdebug(\"ratio: %f\", ratio)\n if ratio >= 2.0 and ratio < 3.0: #started from 2.4\n tl_cropped = image[box[0]:box[2], box[1]:box[3]]\n tl_color, tl_index = self.get_color(tl_cropped)\n #color = ['RED', 'YELLOW', 'GREEN', 'UNKNOWN']\n #tl_index = best_score[2]\n #tl_color = color[tl_index]\n #augment image with detected TLs\n cv2.rectangle(image, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2)\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_color = (255, 255, 255)\n cv2.putText(image, tl_color, (box[1], box[0]), font, 2.0, font_color, lineType=cv2.LINE_AA)\n return image, tl_index", "def classifier():\n\tprint(\"Classifying\")\n\t#initialize important variables\n\tminConfidence = 0.5\n\tthresholdValue = 0.3\n\t\n\t\"\"\"\n\tfile = request.files#['image']\n\tfile.save(\"./classifier_image.jpg\")\n\tframe = cv2.imread(\"./classifier_image.jpg\")\n\t\"\"\"\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\") \n\n\t#file = request.files['image']\n\t#file.save(\"./classifier_image.jpg\")\n\t#frame = cv2.imread(\"./classifier_image.jpg\")\n\t#file = request.json\n\t#frame = np.array(file[\"contour\"], dtype=\"uint8\")\n\t\n\t#Get Image dimensions\n\timage = cv2.copyMakeBorder(frame, 30, 30, 30, 30, cv2.BORDER_CONSTANT, value=255)\n\t(H, W) = image.shape[:2]\n\t\n\t#Get the output layers parameters\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\t\n\t#Create a blob to do a forward pass\n\tblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t#print(H, W)\n\tlayerOutputs = net.forward(ln)\n\tprint(type(net))\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\tfor output in layerOutputs:\n\t\tprint(\"detecting\")\n\t\t#loop over each detection\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability) of\n\t\t\t# the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > minConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to the\n\t\t\t\t# size of the image, keeping in mind that YOLO actually\n\t\t\t\t# returns the center (x, y)-coordinates of the bounding\n\t\t\t\t# box followed by the boxes' width and height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top and\n\t\t\t\t# and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates, confidences,\n\t\t\t\t# and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding\n\t# boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, thresholdValue)\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\toutput = json.load(open(outputFile))\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\tprint(LABELS[classIDs[i]], output[LABELS[classIDs[i]]]+1, confidences[i])\n\t\t\toutput[LABELS[classIDs[i]]]+=1\n\t\t\n\t\tjson.dump(output, open(outputFile, \"w\"))\n\t\treturn LABELS[classIDs[i]]\n\telse:\n\t\treturn Response(status=200)", "def demo(sess, net, img_path):\n\n # Load the demo image\n once_time = 0\n\n im = cv2.imread(img_path)\n im = cv2.resize(im, (227, 227))\n # im = im[np.newaxis, :, :, :]\n t = time.time()\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n print('subtract consume time {}s'.format(time.time() - t))\n im = im_orig[np.newaxis, :, :, :]\n # print('>>>>>>>', im.shape[0], im.shape[1])\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n yaw, pitch, roll, yaw_raw, pitch_raw, roll_raw = net.test_image(sess, im)\n # yaw, pitch = net.test_image(sess, im)\n print(yaw, pitch, roll)\n # print(yaw_raw)\n # print(pitch_raw)\n # print(roll_raw)\n timer.toc()\n once_time = timer.total_time\n print('Detection took {:.3f}s'.format(timer.total_time))\n\n # cv2_vis(im, CLASSES[1], dets, result_file)\n return yaw, pitch, roll, once_time", "def detectUsingModel(model,img):\n\n\n rectangles = model.detectMultiScale(img)\n\n line_color = (0, 255, 0)\n line_type = cv.LINE_4\n\n for (x, y, w, h) in rectangles:\n top_left = (x, y)\n bottom_right = (x + w, y + h)\n cv.rectangle(img, top_left, bottom_right, line_color, lineType=line_type)\n \n return img", "def __init__(self):\r\n self.__hsl_threshold_hue = [58.0, 88.0]\r\n self.__hsl_threshold_saturation = [110.0, 255.0]\r\n self.__hsl_threshold_luminance = [30.0, 140.0]\r\n\r\n self.hsl_threshold_output = None\r\n\r\n self.__rgb_threshold_red = [0.0, 255.0]\r\n self.__rgb_threshold_green = [62.0, 255.0]\r\n self.__rgb_threshold_blue = [0.0, 255.0]\r\n\r\n self.rgb_threshold_output = None\r\n\r\n self.__cv_erode_src = self.rgb_threshold_output\r\n self.__cv_erode_kernel = None\r\n self.__cv_erode_anchor = (-1, -1)\r\n self.__cv_erode_iterations = 0.0\r\n self.__cv_erode_bordertype = cv2.BORDER_CONSTANT\r\n self.__cv_erode_bordervalue = (-1)\r\n\r\n self.cv_erode_output = None\r\n self.__find_contours_input = self.cv_erode_output\r\n self.__find_contours_external_only = False\r\n\r\n self.find_contours_output = None", "def localizeLP(img):\n\t# preprocess\n\tgray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t# using cascade classifier to detect license plate\n\t# the trained classifier is not good, retrain to get better results.\n\t# pre-trained classifier at: https://github.com/openalpr/openalpr/tree/master/runtime_data/region\n\tcascade = cv2.CascadeClassifier(\"localization/cascade_model.xml\")\n\trects = cascade.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=3, minSize=(30, 30),flags=cv2.CASCADE_SCALE_IMAGE)\n\tif len(rects)==0:\n\t\treturn None,None\n\n\trect_list=[(rect[0],rect[1],rect[2],rect[3]) for rect in rects]\n\timg_height=gray_img.shape[0]\n\timg_width=gray_img.shape[1]\n\tmin_height, max_height, min_width, max_width = (0.1*img_height, 0.7*img_height, 0.1*img_width, 0.7*img_width)\n\thorizontal_min, horizontal_max = (0.1*img_width, 0.9*img_width)\n\tfor obj_rect in rect_list:\n\t\tmin_col, min_row, w, h=obj_rect\n\t\tmax_col = min_col+w\n\t\tmax_row=min_row+h\n\t\tif h >= min_height and h <= max_height and w >= min_width and w <= max_width and min_col>horizontal_min and max_col<horizontal_max:\n\t\t\tplate=gray_img[min_row:max_row,min_col:max_col]\n\t\t\treturn plate,(min_row,min_col,max_row,max_col)", "def main():\n # initialize the class labels and set the seed of the pseudorandom\n # number generator so we can reproduce our results\n labels = [\"dog\", \"cat\", \"panda\"]\n np.random.seed(1)\n\n # be * learned * by our model, but for the sake of this example, let's use random values\n W = np.random.randn(3, 3072)\n b = np.random.randn(3)\n\n # load our example image, resize it, and then flatten it into our\n # \"feature vector\" representation\n orig = cv2.imread(\"beagle.png\")\n image = cv2.resize(orig, (32, 32)).flatten()\n\n # compute the output scores by taking the dot product between the\n # weight matrix and image pixels, followed by adding in the b\n scores = W.dot(image) + b\n\n # loop over the scores + labels and display them\n for (label, score) in zip(labels, scores):\n print(\"[INFO] {}: {:.2f}\".format(label, score))\n\n # draw the label with the highest score on the image as our prediction\n cv2.putText(\n orig, \"Label: {}\".format(labels[np.argmax(scores)]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2\n )\n\n # display our input image\n cv2.imshow(\"Image\", orig)\n cv2.waitKey(0)", "def run_visualization(image):\n # for image in images:\n try:\n with tf.gfile.FastGFile(image, 'rb') as f:\n jpeg_str = f.read()\n original_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image')\n return\n\n # print('running deeplab on image {0}'.format(image))\n resized_im, seg_map = MODEL.run(original_im)\n seg_map = seg_map.astype(np.uint8) * 255\n resized_im = np.array(resized_im, dtype=np.uint8)\n resized_im = cv2.cvtColor(resized_im, cv2.COLOR_BGR2RGB)\n # vis_segmentation(resized_im, seg_map,FULL_COLOR_MAP ,LABEL_NAMES)\n overlay_image = cv2.addWeighted(resized_im, 0.8, cv2.merge((seg_map * 0, seg_map, seg_map * 0)), 0.2, 0)\n # time.sleep(params.SEC_BETWEEN_PREDICTION)\n\n return resized_im, seg_map, overlay_image.astype(np.uint8)", "def image_detect_and_compute(detector, img_name):\n img_building = cv2.imread(img_name)\n img_building = cv2.cvtColor(img_building, cv2.COLOR_BGR2RGB)\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(img_building, None)\n img_kp = cv2.drawKeypoints(img_building, kp, img_building)\n return img_building, kp, des", "def vis_detections(im, class_name, dets, thresh=0.8):\n global num\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n frame = im\n im = im[:, :, (2, 1, 0)]\n #fig, ax = plt.subplots(figsize=(12, 12))\n #ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)\n print(num)\n cv2.imwrite('./'+str(num)+\".jpg\", frame)", "def vis_detections(im, class_name, dets, thresh=0.8, highest=False, use_colour=None, h=1.0, w=1.0, rc=1.0, alpha=1.0):\n overlay = im\n for i in range(dets.shape[0]):\n overlay = im.copy()\n output = im.copy()\n bbox = []\n for x, ind in zip(dets[i, :4], range(4)):\n if ind == 0 or ind == 2:\n x = int(np.round(x * w / rc))\n if ind == 1 or ind ==3:\n x = int(np.round(x * h / rc))\n bbox.append(x)\n bbox = tuple(bbox)\n thickness = 2\n if highest:\n colour = (0, 0, 200)\n else:\n colour = (0, 200, 0)\n if use_colour is not None:\n colour = use_colour\n\n if alpha == 1.0:\n cv2.rectangle(overlay, bbox[0:2], bbox[2:4], colour, thickness=thickness)\n cv2.putText(overlay, '%s' % (class_name), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,\n 1.0, (0, 0, 255), thickness=1)\n return overlay\n else:\n cv2.rectangle(overlay, bbox[0:2], bbox[2:4], colour, -1)\n cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)\n return output\n return overlay", "def get_frame(cap):\n\n #camera matrix for camera calibration\n mtx = np.array(np.mat(\"588.4525598886621, 0, 301.8008794717551; 0, 588.9763096391521, 242.617026416902; 0, 0, 1\"))\n\n #distrotion coefficients for camera calibration\n dist = np.array(np.mat(\"-0.4351555722591889, 0.2082765081608728, -0.006072767012672472, 0.008139871640987759, 0\"))\n\n #get image frame from the camera\n ret, frame = cap.read()\n\n return frame\n\n h, w = frame.shape[:2]\n\n #get the new optimal camera matrix and the roi which can be used to crop the result\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),0,(w,h))\n\n #get the undistroted image\n dst = cv2.undistort(frame, mtx, dist, None, newcameramtx)\n\n x,y,w,h = roi\n\n #get the cropped image\n dst = dst[y:y+h, x:x+w]\n h, w = dst.shape[:2]\n\n #furthur crop the image to reduce the size of arena\n dst = dst[int(h/7):int(h*6/7), int(w/7):int(w*6/7)]\n\n #resize the arena to ARENA_SIZE\n dst = cv2.resize(dst, ARENA_SIZE, interpolation= cv2.INTER_CUBIC)\n\n return dst", "def PostProcessing(image, resultList, threshold=0.6):\n\tnum_detections = resultList[0][0].astype(np.int)\n\tscores = resultList[2]\n\tboxes = resultList[3]\n\tbbox_num = 0\n\t\n\t# loop through all the detections and get the confidence and bbox coordinates\n\tfor i in range(num_detections):\n\t\tdet_conf = scores[0, i]\n\t\tdet_ymin = boxes[0, i, 0]\n\t\tdet_xmin = boxes[0, i, 1]\n\t\tdet_ymax = boxes[0, i, 2]\n\t\tdet_xmax = boxes[0, i, 3]\n\n\t\tbbox_width = det_xmax - det_xmin\n\t\tbbox_height = det_ymax - det_ymin\n\t\t# the detection confidence and bbox dimensions must be greater than a minimum value to be a valid detection\n\t\tif threshold <= det_conf and 1 >= det_conf and bbox_width > 0 and bbox_height > 0:\n\t\t\tbbox_num += 1\n\t\t\txmin = int(round(det_xmin * image.shape[1]))\n\t\t\tymin = int(round(det_ymin * image.shape[0]))\n\t\t\txmax = int(round(det_xmax * image.shape[1]))\n\t\t\tymax = int(round(det_ymax * image.shape[0]))\n\t\t\t\n\t\t\tcv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)\n\t\telse:\n\t\t\tcontinue\n\n\tprint(\"detected bbox num:\", bbox_num)\n\tSRC_PATH = os.path.realpath(__file__).rsplit(\"/\", 1)[0]\n\tOutput_PATH = os.path.join(SRC_PATH, \"../output/output.jpg\")\n\ttry:\n\t\tos.mkdir(os.path.join(SRC_PATH, \"../output/\"))\n\texcept Exception as e:\n\t\tprint(\"Output Path already exists\")\n\tcv2.imwrite(Output_PATH, image)", "def detectCard(self):\n\n # The phash python bindings operate on files, so we have to write our\n # current frame to a file to continue\n cv2.imwrite('frame.jpg', self.frame)\n\n # Use phash on our frame\n ihash = phash.dct_imagehash('frame.jpg')\n idigest = phash.image_digest('frame.jpg')\n #print('1a')\n candidates = {}\n hashes = self.referencedb.get_hashes()\n #print('1a1')\n #print(hashes)\n for MultiverseID in hashes:\n #print('id %i' % MultiverseID)\n if (MultiverseID in self.blacklist):\n continue\n \n hamd = phash.hamming_distance(ihash, int(hashes[MultiverseID]))\n #print('1a11')\n #print('ham: %i tresh: %i id: %i' % (hamd, self.threshold, MultiverseID))\n #print(hamd <= self.threshold)\n if (hamd <= self.threshold):\n #print('X')\n candidates[MultiverseID] = hamd\n #print('1a2')\n if (not len(candidates)):\n print('No matches found')\n return None\n\n #print('1a3')\n finalists = []\n minV = min(candidates.values())\n #print('1a4')\n for MultiverseID in candidates:\n if (candidates[MultiverseID] == minV):\n finalists.append(MultiverseID)\n\n #print('1b') \n \n bestMatch = None\n correlations = {}\n for MultiverseID in finalists:\n\n \n hamd = candidates[MultiverseID]\n #print(self.ROOT_PATH % self.referencedb.IMAGE_FILE % MultiverseID)\n s = self.referencedb.IMAGE_FILE % MultiverseID\n s = '%s/%s' % (self.ROOT_PATH, s)\n #print(s)\n #digest = phash.image_digest(self.referencedb.IMAGE_FILE % MultiverseID)\n digest = phash.image_digest(s)\n \n corr = phash.cross_correlation(idigest, digest)\n \n if (bestMatch is None or corr > correlations[bestMatch]):\n bestMatch = MultiverseID\n correlations[MultiverseID] = corr\n \n name, code, rarity = self.referencedb.get_card_info(MultiverseID)\n print('Candidate: ' + name + ' [' + code + '] ' + str(corr))\n #print('1d')\n \n bestMatches = []\n ACCURACY = 1000\n print('Finalists:')\n print(finalists)\n for MultiverseID in finalists:\n if correlations[MultiverseID] + ACCURACY > correlations[bestMatch]:\n bestMatches.append(MultiverseID)\n \n #return bestMatches \n #return more finallist \n \n return bestMatch", "def detect(self):\n # process the input video and get the attributes:\n self.process_video()\n\n # build a rcnn/ yolov5 predictor:\n self.build_predictor()\n\n \n # assert not os.path.isfile(args.output_file), \"File with the name %s already exists\"%args.output_file\n # build the writer with same attributes:\n self.vid_writer = cv2.VideoWriter(self.output, self.fourcc, self.fps, (self.w, self.h))\n\n # inference time:\n start = time.time()\n print(\"Started inference\\n\")\n \n # progress bar using tqdm:\n pbar = tqdm(total=self.nframes)\n\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == False:\n break # when the last frame is read \n\n # different formats of results:\n if self.library == \"yolov5\":\n # predict and bring the outputs to cpu:\n results = self.predictor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # convert to RGB\n predictions = results.xyxy[0].cpu()\n # find the instance indices with person:\n person_idx = predictions[:,5] == self.label_dict[\"person\"]\n # extract the corresponding boxes and scores:\n boxes = predictions[person_idx,:4].numpy()\n probs = predictions[person_idx,4].numpy()\n\n if self.library == \"detectron2\":\n # predict and bring the outputs to cpu:\n results = self.predictor(frame) # RGB conversion done automatically in detectron\n predictions = results[\"instances\"].to(\"cpu\")\n # find the instance indices with person:\n person_idx = [predictions.pred_classes == self.label_dict[\"person\"]]\n # extract the corresponding boxes and scores:\n boxes = predictions.pred_boxes[person_idx].tensor.numpy()\n probs = predictions.scores[person_idx].numpy()\n\n # draw boxes and write the frame to the video:\n if len(boxes): # check whether there are predictions\n box_frame = self.draw_person_boxes(frame, boxes, probs)\n else:\n box_frame = frame\n self.vid_writer.write(box_frame)\n\n pbar.update(1)\n pbar.close()\n\n # release the video capture object and write object:\n self.cap.release()\n self.vid_writer.release()\n\n print(\"Inferene on the video file took %0.3f seconds\"%(time.time()-start))", "def draw_boxes(image, gt_boxes_norm, pre_boxes_norm):\n # Load Image\n image = (image * 255.0).astype(np.uint8)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n #image = cv2.add(image,image)\n #image = cv2.bitwise_not(image)\n # Draw prediction boxes\n for pre_box_points in pre_boxes_norm:\n image_shape = np.flip(image.shape[0:2], axis=0)\n\n for pre_box_point_idx in range(len(pre_box_points)):\n\n pre_start_point = pre_box_points[pre_box_point_idx] * image_shape\n pre_end_point = pre_box_points[(pre_box_point_idx + 1) % 4] * image_shape\n\n pre_start_point = pre_start_point.astype(np.int32)\n pre_end_point = pre_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(pre_start_point),\n tuple(pre_end_point),\n (107,222,35), thickness=1)\n\n # Draw boxes if they exist\n if gt_boxes_norm is not None:\n for gt_box_points in gt_boxes_norm:\n for gt_box_point_idx in range(len(gt_box_points)):\n\n gt_start_point = gt_box_points[gt_box_point_idx] * image_shape\n gt_end_point = gt_box_points[(gt_box_point_idx + 1) % 4] * image_shape\n\n gt_start_point = gt_start_point.astype(np.int32)\n gt_end_point = gt_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(gt_start_point),\n tuple(gt_end_point),\n (0,0,205), thickness=1)\n\n return image", "def detect_image(yolo_v3_model, image_paths, batch_frames, output_path, train_input_size, classes_file_path, \n score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False, \n rectangle_colors = ''):\n \n # obtain number of classes\n num_of_classes = len(read_class_names(classes_file_path))\n \n # create list to store images\n original_images = []\n \n # iterate over images in chronological order (last image is image of interest to put bbox)\n for x in range(batch_frames):\n \n # obtain original image\n original_image = cv2.imread(image_paths[x])\n \n # append original image to original_images list\n original_images.append(original_image[:])\n \n # convert original image to grayscale \n image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)\n \n # preprocess image\n image = transform_images(image[:], train_input_size)\n\n # obtain concat frame if none exist\n if x == 0: \n\n concat_image = image[:]\n\n # concatenate subsequent frames to concat_image\n else:\n\n concat_image = np.concatenate((concat_image, image), axis = -1)\n \n # add batch dimensions to concatenated image \n concat_image = concat_image[np.newaxis, ...].astype(np.float32)\n \n # create constant tensor from concatenated image and feed it to yolo_v3_model\n batched_input = tf.constant(concat_image)\n yolo_output = yolo_v3_model(batched_input)\n \n # list to store bboxes from respective scales\n pred_bbox = []\n \n # iterate over 3 scales\n for i in range(3):\n\n # decode resepctive yolo_output from each scale\n pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox, \n classes = num_of_classes, strides = strides, anchors = anchors, index = i)\n \n # obtain results of shape (:, 5 + num_classes), i.e all bboxes\n pred_result_reshaped = tf.reshape(pred_result, (-1, tf.shape(pred_result)[-1]))\n \n # append to pred_bbox\n pred_bbox.append(pred_result_reshaped)\n \n # concatenate all bboxes from all scales\n pred_bbox = tf.concat(pred_bbox, axis = 0)\n \n # post process all bboxes using latest image in orignal_images\n bboxes = postprocess_boxes(pred_bbox, original_images[-1], train_input_size, score_threshold)\n \n # non maximal supression for bboxes\n bboxes = nms(bboxes, iou_threshold, method = 'nms')\n \n # draw bbox on latest image in orignal_images\n image = draw_bbox(original_images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)\n \n # save image if path to save is given\n if output_path != '': cv2.imwrite(output_path, image)\n \n # display image if show is true \n if show:\n \n # show the image\n cv2.imshow(\"predicted image\", image)\n \n # load and hold the image\n cv2.waitKey(0)\n \n # to close the window after the required kill value was provided\n cv2.destroyAllWindows()\n \n return image", "def findMatchesBetweenImages(image_1, image_2):\n # matches - type: list of cv2.DMath\n matches = None\n # image_1_kp - type: list of cv2.KeyPoint items.\n image_1_kp = None\n # image_1_desc - type: numpy.ndarray of numpy.uint8 values.\n image_1_desc = None\n # image_2_kp - type: list of cv2.KeyPoint items.\n image_2_kp = None\n # image_2_desc - type: numpy.ndarray of numpy.uint8 values.\n image_2_desc = None\n # WRITE YOUR CODE HERE.\n\n sift = cv2.ORB_create()\n image_1_kp, image_1_desc = sift.detectAndCompute(image_1, None)\n image_2_kp, image_2_desc = sift.detectAndCompute(image_2, None)\n\n # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # matches = bf.match(image_1_desc, image_2_desc)\n # matches = sorted(matches, key = lambda x:x.distance)\n # matches = matches[:10]\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(image_1_desc, image_2_desc, k=2)\n\n # Apply ratio test\n good = []\n for m, n in matches:\n print(m.distance, n.distance, m.distance < .75*n.distance)\n if m.distance < (0.75 * n.distance):\n good.append([m])\n\n # We coded the return statement for you. You are free to modify it -- just\n # make sure the tests pass.\n print(len(good), good)\n return image_1_kp, image_2_kp, matches\n # END OF FUNCTION.", "def detect(self, input_image):\n self.t.start()\n frame = self.convert_image(input_image)\n frame = cv2.pyrDown(frame)\n\n img, confidence, x, y = self.detector.detect(frame)\n print('Detection:', confidence, x, y)\n det = Target_coordinates()\n det.confidence = confidence\n det.x = x\n det.y = y\n self.pub_detection.publish(det)\n self.pub_fpv.publish(self.bridge.cv2_to_imgmsg(img))\n cv2.imwrite('frames/frame%d.jpg' % self.frame_num, img)\n self.frame_num += 1\n self.t.end()\n # Display\n cv2.imshow(self.iw, img)\n key = cv2.waitKey(30) & 0xFF\n if key == 27:\n cv2.destroyAllWindows()\n sys.exit(27)", "def video_handle_for_demo():\n frame = cv2.imread(\"vision.png\")\n\n return frame", "def setupuv(rc):\n if cv is not None:\n (r,c) = rc\n u = cv.CreateMat(r, c, cv.CV_32FC1)\n v = cv.CreateMat(r, c, cv.CV_32FC1)\n return (u, v)\n else:\n return [None]*2", "def placement_mat(image):\n\n kernel = np.ones((3, 3), np.uint8)\n\n crop_image, x_shift, y_shift = crop_table(image)\n # crop_image = image\n # cv2.imwrite(\"CroppedUndistorted_PlacementMat.jpg\", crop_image)\n gray = cv2.cvtColor(crop_image, cv2.COLOR_BGR2GRAY)\n gray = shadows(gray)\n\n # Erosion\n # new_image = cv2.dilate(new_image, kernel, iterations=1)\n\n # Opening - removes noise\n new_image = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel, iterations=1)\n new_image = cv2.morphologyEx(new_image, cv2.MORPH_OPEN, kernel, iterations=2)\n # new_image = gray\n\n # new_image = cv2.GaussianBlur(new_image, (5, 5), 0)\n\n # alpha * old_image + beta\n\n alpha = 1 # Simple contrast control\n beta = 0 # Simple brightness control\n new_image = cv2.convertScaleAbs(new_image, alpha=alpha, beta=beta)\n\n\n # Gamma correction\n\n gamma = 1\n table = np.array([((i / 255.0) ** gamma) * 255 for i in np.arange(0, 256)]).astype(\"uint8\")\n new_image = cv2.LUT(new_image, table)\n\n # new_image = cv2.medianBlur(new_image, 5)\n # new_image = cv2.medianBlur(new_image, 5)\n\n cv2.imshow(\"new image\", new_image)\n cv2.waitKey(1000)\n\n\n edged = cv2.Canny(new_image, 50, 100)\n edged = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel, iterations=1)\n # edged = cv2.morphologyEx(edged, cv2.MORPH_OPEN, kernel, iterations=1)\n\n cv2.imshow(\"edged\", edged)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\n\n\n # # Object Detection\n #\n # # blur = cv2.medianBlur(gray, 3)\n # # cv2.imshow(\"Blurred\", blur)\n # # cv2.waitKey(0)\n #\n # Belt Detector\n x, y = circular_detector(gray, 110, 120)\n #\n # Housed Bearing\n x, y = circular_detector(gray, 50, 60) # Finds the inside circle of housed bearing\n #\n # M12 Nut\n # x, y = M12Nut(gray)\n x, y = Hexagon(edged)\n\n # # # Pulley\n # # x, y = pulley(gray)\n #\n # # 17mm Spacer\n # x, y = circular_detector(edged, 13, 15)\n #\n # # 9mm Spacer\n # x, y = circular_detector(edged, 6, 10)\n\n _, cnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n # for c in cnts:\n # cv2.drawContours(edged, [c], -1, (0, 0, 0), 2)\n # cv2.imshow(\"removing contours\", edged)\n # cv2.waitKey(100)\n # cv2.destroyAllWindows()\n #\n # cv2.destroyAllWindows()\n # cv2.drawContours(edged, cnts[0], -1, (255, 255, 255), 2)\n # cv2.imshow(\"contour0\", edged)\n # cv2.waitKey()\n\n\n try:\n # correct image point for crop\n x = x + x_shift\n y = y + y_shift\n except:\n # No crop occured\n pass\n\n return x, y", "def detect(self, img):\n # 1. color filter\n lane_img = self.color_filter(img.copy())\n # 2. gaussian blur\n lane_img = self.gaussian_blur(lane_img)\n # 3.canny edge detection\n lane_img = self.canny(lane_img)\n # 4. region of interest crop\n lane_img = self.region_of_interest(lane_img)\n # 5. hough lines\n lane_img = self.hough_lines(lane_img)\n # 6. overlay lane over original image\n result_img = weighted_img(lane_img, img)\n\n return result_img", "def parameterizeLine(self, img):\n # TODO-START: run linear regression to parameterize the line\n\n\n\n #### direct conversion to CV2 ####\n np_arr = np.fromstring(img.data, np.uint8)\n d = cv2.imdecode(np_arr, cv2.IMREAD_GRAYSCALE)\n # d = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY)\n\n\n upper = 255\n lower = 255\n\n k_dim = 5\n kernel = np.ones((k_dim,k_dim),np.uint8)\n\n copy = deepcopy(d) #preserves raw images, but requires more time and processing power \n copy2 = cv2.inRange(copy,lower,upper)\n copy3 = cv2.morphologyEx(copy2,cv2.MORPH_CLOSE,kernel)\n # d = cv2.resize(d, (0,0), fx=10, fy=10)\n# \n ret,thresh = cv2.threshold(copy3,0,255,0)\n im2,contours,hierarchy = cv2.findContours(thresh, 1, 2)#adds the pixels in the threshold to the list of possible contours\n \n if len(contours) > 0:\n\n max_contours = max(contours, key = lambda x: cv2.contourArea(x))\n\n [vx,vy,x,y] = cv2.fitLine(max_contours, cv2.DIST_L2,0,0.01,0.01)\n\n lefty = int((-x*vy/vx) + y)\n righty = int(((128-x)*vy/vx)+y)\n\n pt1 = (127,righty)\n pt2 = (0,lefty)\n\n regression = cv2.line(copy3,pt1,pt2,(0,255,0),2)\n regression = cv2.line(d,pt1,pt2,(0,255,0),2)\n\n # TODO-START: return x, y, vx, and vy in that order\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(copy3, \"8UC1\"))\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(d, \"8UC1\"))\n print(x,y,vx,vy)\n return x,y,vx,vy\n\n else:\n pass\n\n # TODO-END", "def compute_TVL1(image_dir):\n image_list = sorted(os.listdir(image_dir))\n pre_image = cv2.imread()\n TVL1 = DualTVL1()\n flow = []\n for i in range(1, len(image_list)):\n cur_image = cv2.imread()\n cur_flow = TVL1.calc(pre_image, cur_image, None)\n pre_image = cur_image\n\n max_val = lambda x: max(max(x.flatten()), abs(min(x.flatten())))\n cur_flow = cur_flow / max_val(cur_flow)\n flow.append(cur_flow)\n flow = np.array(flow)\n return flow", "def video_process(threshold=THRESHOLD, inputpath=INPUTPATH, file=FILE):\n #create video capture object\n cap = cv2.VideoCapture(f'{inputpath}{file}')\n name = file.split('/')[-1].split('.')[0]\n frame_sqrs_list = []\n if (cap.isOpened()==False):\n logging.error('Error opening video stream or file')\n model = load_model()\n frame_n = 1\n print('model loaded')\n while(cap.isOpened()):\n #capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n squares_list = img_preprocess(frame)\n frame_n = frame_n+1\n print(f'enter video file, frame{frame_n}')\n x_list = []\n y_list = []\n for sq in squares_list:\n predict = predict_hot_pxl(sq.sq, model)\n if predict > threshold:\n pred = 1\n print('ERROR')\n x_list.append(sq.y)\n y_list.append(sq.x)\n # draw square around error in frame:\n # FIXME: save a square to a list of squares\n continue\n else:\n pred = 0\n print('no error')\n # FIXME: draw_sqr(name, frame, frame_n, !!! PASS LIST INSTEAD !!! and rewrite the draw func to draw several squares sq.y, sq.x) \n sq = sq._replace(pred_float = predict)\n sq = sq._replace(pred_int = pred)\n # dict element sq is now obsolete, remove it\n sq = sq._replace(sq = None)\n # save single frame with squares marking errors as png to disc:\n draw_sqr(name, frame, frame_n, x_list, y_list)\n frame_sqrs_list.append(sq)\n # Break the loop\n else:\n break\n return name, frame_sqrs_list", "def __stage3(self, img, total_boxes, stage_status: StageStatus):\r\n num_boxes = total_boxes.shape[0]\r\n if num_boxes == 0:\r\n return total_boxes, np.empty(shape=(0,))\r\n\r\n total_boxes = np.fix(total_boxes).astype(np.int32)\r\n\r\n status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height),\r\n width=stage_status.width, height=stage_status.height)\r\n\r\n tempimg = np.zeros((48, 48, 3, num_boxes))\r\n\r\n for k in range(0, num_boxes):\r\n\r\n tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3))\r\n\r\n tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \\\r\n img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :]\r\n\r\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\r\n tempimg[:, :, :, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA)\r\n else:\r\n return np.empty(shape=(0,)), np.empty(shape=(0,))\r\n\r\n tempimg = (tempimg - 127.5) * 0.0078125\r\n tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))\r\n\r\n out = self._onet.run(tempimg1)\r\n out0 = np.transpose(out[0])\r\n out1 = np.transpose(out[1])\r\n out2 = np.transpose(out[2])\r\n\r\n score = out2[1, :]\r\n\r\n points = out1\r\n\r\n ipass = np.where(score > self._steps_threshold[2])\r\n\r\n points = points[:, ipass[0]]\r\n\r\n total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])\r\n\r\n mv = out0[:, ipass[0]]\r\n\r\n w = total_boxes[:, 2] - total_boxes[:, 0] + 1\r\n h = total_boxes[:, 3] - total_boxes[:, 1] + 1\r\n\r\n points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1\r\n points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1\r\n\r\n if total_boxes.shape[0] > 0:\r\n total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv))\r\n pick = self.__nms(total_boxes.copy(), 0.7, 'Min')\r\n total_boxes = total_boxes[pick, :]\r\n points = points[:, pick]\r\n\r\n return total_boxes, points", "def limb_tracker(frame=None, path=None, memory_efficient=True):\n start = time.time()\n # initialize matrix\n if path is not None:\n frame = cv2.imread(filename=path)\n # operations on frame\n gray = cv2.cvtColor(src=frame, code=cv2.COLOR_RGBA2GRAY)\n # remove noise from frame\n frame = cv2.fastNlMeansDenoisingColored(src=frame)\n # create hsv and split channels\n hsv = cv2.cvtColor(src=frame, code=cv2.COLOR_RGB2HSV)\n hue, sat, val = cv2.split(hsv)\n # create 'thresholded'\n _, thresholded = cv2.threshold(src=hue, thresh=0, maxval=255,\n type=cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # detect face, hands, upper body and full body\n face, _, _ = cc_front.detectMultiScale3(image=gray, scaleFactor=1.3, minNeighbors=5,\n outputRejectLevels=True)\n\n face_roi = None; face_roi_in_frame = None; shoulders = None\n if not memory_efficient:\n upper_body, _, _ = cc_upper.detectMultiScale3(image=gray, scaleFactor=1.1, minNeighbors=7,\n outputRejectLevels=True)\n upper_body = []\n upper_roi = None\n\n ##################\n # FACE DETECTION #\n ##################\n for (x, y, w, h) in face:\n cv2.rectangle(img=frame, pt1=(x, y), pt2=(x + w, y + h), color=(0, 255, 0), thickness=2)\n face_roi = frame[y: y + h, x: x + w]\n try:\n # first match template in original frame\n ''' The matchTemplate() method requires three parameters. The first is the full \n frame image (i.e. the image that contains what we are searching for), the second\n is our query image (i.e. the image for which we're trying to pinpoint the location)\n and the third is the matching method we wish to use (there are a number of methods\n to perform template matching, but in this case I am using the 'correlation \n coefficient' which is specified by the flag cv2.TM_CCOEFF). '''\n result = cv2.matchTemplate(image=frame, templ=face_roi, method=cv2.TM_CCOEFF)\n # here I get min/max location values (only max will be used, that's the top left corner)\n _, _, minloc, maxloc = cv2.minMaxLoc(src=result)\n topleft = maxloc\n # grab the bounding box of roi and extract it from the frame image\n botright = (topleft[0] + face_roi.shape[0], topleft[1] + face_roi.shape[1])\n face_roi_in_frame = frame[topleft[1]:botright[1], topleft[0]:botright[0]]\n except (AttributeError, cv2.error):\n print('Error 001 occurred')\n pass\n ########################\n # UPPER BODY DETECTION #\n ########################\n for (x, y, w, h) in upper_body:\n # cv2.rectangle(img=frame, pt1=(x, y), pt2=(x + w, y + h), color=(0, 0, 255),\n # thickness=2)\n upper_roi = frame[y: y + h, x: x + w]\n upper_body_surface = (y + h) * (x + w)\n print('Centroid upper body pixel: ' + str(centroid_of_rect(roi=upper_roi)))\n shoulders = approx_shoulders(upper_body_roi=upper_roi)\n try:\n cv2.circle(img=upper_roi, center=shoulders[0], color=(0, 0, 255),\n radius=3, thickness=8)\n cv2.circle(img=upper_roi, center=shoulders[1], color=(0, 0, 255),\n radius=3, thickness=8)\n except (AttributeError, cv2.error):\n pass\n try:\n result = cv2.matchTemplate(image=frame, templ=upper_roi, method=cv2.TM_CCOEFF)\n _, _, minloc, maxloc = cv2.minMaxLoc(src=result)\n topleft = maxloc\n botright = (topleft[0] + upper_roi.shape[0], topleft[1] + upper_roi.shape[1])\n upper_roi_in_frame = frame[topleft[1]:botright[1], topleft[0]:botright[0]]\n cv2.circle(img=upper_roi_in_frame, center=centroid_of_rect(roi=upper_roi_in_frame),\n color=(0, 0, 255), radius=3, thickness=8)\n shoulders_in_frame = ((maxloc[0] + shoulders[0][0], maxloc[1] + shoulders[0][1]),\n (maxloc[0] + shoulders[1][0], maxloc[1] + shoulders[1][1]))\n b_arms = approx_biceps(thresh=thresholded, shoulder_pts=shoulders_in_frame, dst=frame)\n print('Left biceps coordinates: ' + str(b_arms[1]))\n print('Right biceps coordinates: ' + str(b_arms[0]))\n # point between shoulders\n mid_shoulders_pt = centroid_of_rect(roi=upper_roi_in_frame)[0], shoulders[0][1]\n cv2.circle(img=upper_roi, center=mid_shoulders_pt, radius=3, color=(0, 0, 255),\n thickness=8)\n f_arms = approx_forearms(shoulder_pts=shoulders_in_frame, biceps_pts=b_arms,\n thresh=thresholded, dst=frame)\n print('Left forearm coordinates: ' + str(f_arms[1]))\n print('Right forearm coordinates: ' + str(f_arms[0]))\n\n ''' Draw lines on frame for points we have so far '''\n\n if shoulders_in_frame[0] is not None and b_arms[0] is not None:\n # left shoulder joint to biceps\n cv2.line(img=frame, pt1=shoulders_in_frame[0], pt2=b_arms[0],\n color=(255, 0, 128), thickness=3)\n if shoulders_in_frame[1] is not None and b_arms[1] is not None:\n # right shoulder joint to biceps\n cv2.line(img=frame, pt1=shoulders_in_frame[1], pt2=b_arms[1],\n color=(255, 0, 128), thickness=3)\n if b_arms[0] is not None and f_arms[0] is not None:\n # right biceps joint to forearm\n cv2.line(img=frame, pt1=b_arms[0], pt2=f_arms[0], color=(255, 0, 128), thickness=3)\n if b_arms[1] is not None and f_arms[1] is not None:\n # left biceps joint to forearm\n cv2.line(img=frame, pt1=b_arms[1], pt2=f_arms[1], color=(255, 0, 128), thickness=3)\n # join shoulders and mid-shoulders point\n cv2.line(img=upper_roi_in_frame, pt1=shoulders[0], pt2=mid_shoulders_pt,\n color=(255, 0, 128), thickness=3)\n cv2.line(img=upper_roi_in_frame, pt1=shoulders[1], pt2=mid_shoulders_pt,\n color=(255, 0, 128), thickness=3)\n # mid-shoulders point to neck joint\n cv2.line(img=upper_roi_in_frame, pt1=mid_shoulders_pt,\n pt2=centroid_of_rect(roi=upper_roi_in_frame), color=(255, 0, 128), thickness=3)\n except (AttributeError, cv2.error):\n print('Error 002 occurred')\n pass\n\n # track time\n end = time.time(); exectime = end - start\n print('Execution time for initial detect: ' + str(exectime) + ' secs')\n\n return frame\n\n else:\n ##################\n # FACE DETECTION #\n ##################\n for (x, y, w, h) in face:\n cv2.rectangle(img=frame, pt1=(x, y), pt2=(x + w, y + h), color=(0, 255, 0), thickness=2)\n face_roi = frame[y: y + h, x: x + w]\n try:\n result = cv2.matchTemplate(image=frame, templ=face_roi, method=cv2.TM_CCOEFF)\n _, _, minloc, maxloc = cv2.minMaxLoc(src=result)\n topleft = maxloc\n botright = (topleft[0] + face_roi.shape[0], topleft[1] + face_roi.shape[1])\n face_roi_in_frame = frame[topleft[1]:botright[1], topleft[0]:botright[0]]\n face_bottom = (maxloc[0] + int(face_roi_in_frame.shape[0]/2),\n maxloc[1] + face_roi_in_frame.shape[1])\n cv2.circle(img=frame, center=face_bottom, radius=3, color=(0, 0, 255), thickness=8)\n ''' Approximate mid-shoulders point to the same value on X-Axis as face bottom, plus \n height of face rectangle, minus 1/5th of that same value. '''\n mid_shoulders_pt = (face_bottom[0],\n face_bottom[1] +\n face_roi_in_frame.shape[1] -\n int(face_roi_in_frame.shape[1]/5))\n cv2.circle(img=frame, center=mid_shoulders_pt, radius=3, color=(0, 0, 255), thickness=8)\n ''' Approximate right shoulder point to the same value on X-Axis as mid-shoulders, minus\n width of face rectangle, minus 1/15th of that same value.'''\n r_shoulder = (mid_shoulders_pt[0] -\n face_roi_in_frame.shape[0] -\n int(face_roi_in_frame.shape[0]/15),\n mid_shoulders_pt[1])\n cv2.circle(img=frame, center=r_shoulder, radius=3, color=(0, 0, 255), thickness=8)\n ''' Approximate left shoulder point to the same value on X-Axis as mid-shoulders, plus\n width of face rectangle, plus 1/15th of that same value.'''\n l_shoulder = (mid_shoulders_pt[0] +\n face_roi_in_frame.shape[0] +\n int(face_roi_in_frame.shape[0] / 4),\n mid_shoulders_pt[1])\n cv2.circle(img=frame, center=l_shoulder, radius=3, color=(0, 0, 255), thickness=8)\n shoulders = (r_shoulder, l_shoulder)\n biceps = approx_biceps(thresh=thresholded, shoulder_pts=shoulders, dst=frame)\n forearms = approx_forearms(shoulder_pts=shoulders, biceps_pts=biceps,\n thresh=thresholded, dst=frame)\n\n ''' Draw lines for points obtained '''\n # mid-shoulders to face bottom\n cv2.line(img=frame, pt1=mid_shoulders_pt, pt2=face_bottom,\n color=(255, 0, 128), thickness=3)\n if shoulders[0] is not None and biceps[0] is not None:\n # left shoulder joint to biceps\n cv2.line(img=frame, pt1=shoulders[0], pt2=biceps[0],\n color=(255, 0, 128), thickness=3)\n # left shoulder to mid-shoulders\n cv2.line(img=frame, pt1=shoulders[0], pt2=mid_shoulders_pt,\n color=(255, 0, 128), thickness=3)\n if shoulders[1] is not None and biceps[1] is not None:\n # right shoulder joint to biceps\n cv2.line(img=frame, pt1=shoulders[1], pt2=biceps[1],\n color=(255, 0, 128), thickness=3)\n # right shoulder to mid-shoulders\n cv2.line(img=frame, pt1=shoulders[1], pt2=mid_shoulders_pt,\n color=(255, 0, 128), thickness=3)\n if biceps[0] is not None and forearms[0] is not None:\n # right biceps joint to forearm\n cv2.line(img=frame, pt1=biceps[0], pt2=forearms[0], color=(255, 0, 128), thickness=3)\n if biceps[1] is not None and forearms[1] is not None:\n # left biceps joint to forearm\n cv2.line(img=frame, pt1=biceps[1], pt2=forearms[1], color=(255, 0, 128), thickness=3)\n except (AttributeError, cv2.error):\n print('Error 001 occurred')\n pass\n # track time\n end = time.time(); exectime = end - start\n print('Execution time for initial detect: ' + str(exectime) + ' secs')\n return frame", "def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)", "def demo_video(net, im,frame_number,args, output_folder, conf_thresh):\n \n #data_dir = '/mnt/nfs/scratch1/souyoungjin/RESULTS_FACE_DETECTIONS/ARUNI/video'+str(args.video_id)\n #out_dir = '/mnt/nfs/scratch1/ashishsingh/RESULTS_DOG_DETECTIONS/ARUNI/OHEM/'+args.video_folder_name\n \n out_dir = join(output_folder, args.video_id)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n timer.toc()\n print ('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n #CONF_THRESH = 0.80 #check threshold values\n CONF_THRESH = conf_thresh\n NMS_THRESH = 0.15 #check threshold values\n \n # detection file\n dets_file_name = os.path.join(out_dir, 'video'+str(args.video_id)+'.txt') \n fid = open(dets_file_name, 'a+')\n sys.stdout.write('%s ' % (frame_number))\n \n cls_ind = 1\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n\n keep = np.where(dets[:, 4] > CONF_THRESH)\n dets = dets[keep]\n \n dets[:, 2] = dets[:, 2] - dets[:, 0] + 1\n dets[:, 3] = dets[:, 3] - dets[:, 1] + 1\n \n fid.write('FRAME NUMBER: '+ str(frame_number) + '\\n')\n fid.write(str(dets.shape[0]) + '\\n')\n for j in xrange(dets.shape[0]):\n fid.write('%f %f %f %f %f\\n' % (dets[j, 0], dets[j, 1], dets[j, 2], dets[j, 3], dets[j, 4]))\n\n print ''\n fid.close()\n \n #for cls_ind, cls in enumerate(CLASSES[1:]):\n #cls_ind += 1 # because we skipped background\n #cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n\n #cls_scores = scores[:, cls_ind]\n #dets = np.hstack((cls_boxes,cls_scores[:, np.newaxis])).astype(np.float32)\n #keep = nms(dets, NMS_THRESH)\n #dets = dets[keep, :]\n #im=vis_detections_video(im, cls, dets, thresh=CONF_THRESH)\n #cv2.imwrite(os.path.join('output',str(time.time())+'.jpg'),im)\n \n \n #cv2.imshow('ret',im)\n #cv2.waitKey(20)", "def MLP_Detection_MP(image, init_detection=False):\n\n h, w = image.shape[:2]\n\n def sliding_window_mp(thread_id):\n blocks = []\n\n num_block_x = (w - BBOX_SIZE[0]) / STEP_SIZE[0] + 1\n num_block_y = (h / 2 - BBOX_SIZE[1]) / STEP_SIZE[1] + 1\n num_blocks = num_block_x * num_block_y\n\n for i in xrange(thread_id, num_blocks, NUM_THREADS):\n x = i % num_block_x * STEP_SIZE[0]\n y = i / num_block_x * STEP_SIZE[1]\n blocks.append((x, y, image[y:y + BBOX_SIZE[1], x:x + BBOX_SIZE[0]]))\n return blocks\n\n def work(thread_id, image, result):\n blocks = sliding_window_mp(thread_id)\n for idx, (x, y, im_window) in enumerate(blocks):\n if im_window.shape[0] != BBOX_SIZE[1] or im_window.shape[1] != BBOX_SIZE[0]:\n continue\n \n # Calculate the HOG features\n fd = [hog(im_window[..., i], orientations=9, \n pixels_per_cell=(8, 8), \n cells_per_block=(3, 3), \n block_norm=\"L2\", \n visualise=False) for i in range(3)]\n fd = np.array(fd)\n fd = [fd.reshape(fd.size)]\n pred = clf.predict(fd)\n if pred == 1:\n currScore = float(clf.predict_proba(fd)[0][pred])\n tmp = (x, y, int(BBOX_SIZE[0]), int(BBOX_SIZE[1]), currScore)\n result.append(tmp)\n \n def work_bg(image, centroid, result):\n x, y = centroid[0] - BBOX_SIZE[0] / 2, centroid[1] - BBOX_SIZE[1] / 2\n im_window = image[y: y + BBOX_SIZE[1], \n x: x + BBOX_SIZE[0]]\n \n if im_window.shape[0] != BBOX_SIZE[1] or im_window.shape[1] != BBOX_SIZE[0]:\n return \n\n fd = hog(im_window, orientations=9, \n pixels_per_cell=(8, 8), \n cells_per_block=(3, 3), \n block_norm=\"L2\", \n visualise=False)\n fd = np.array(fd)\n fd = [fd.reshape(fd.size)]\n pred = bg_clf.predict(fd)\n if pred == 1:\n currScore = float(bg_clf.predict_proba(fd)[0][pred])\n tmp = (x, y, int(BBOX_SIZE[0]), int(BBOX_SIZE[1]), currScore)\n result.append(tmp)\n\n bs_image, centroids = process_bs(image, return_centroids=True)\n if not init_detection:\n # Assign jobs\n tic = time.time()\n threads = []\n results = [[] for _ in range(NUM_THREADS)]\n for centroid, result in zip(centroids, results):\n t = threading.Thread(target=work_bg, args=(bs_image, centroid, result))\n t.start()\n threads.append(t)\n\n else: \n # Swap image channel from BGR to RGB\n image = swapChannels(image)\n h, w = image.shape[:2]\n\n # Assign jobs\n tic = time.time()\n threads = []\n results = [[] for _ in range(NUM_THREADS)]\n for thread_id, result in enumerate(results):\n t = threading.Thread(target=work, args=(thread_id, image, result))\n t.start()\n threads.append(t)\n\n # Wait for computing\n still_alive = True\n while still_alive:\n still_alive = False\n for t in threads:\n if t.isAlive():\n still_alive = True\n if DEBUG_MODE:\n print \"Total time: %.5fs\" % (time.time() - tic)\n\n # Get final result\n detections = []\n final_select = None\n score = 0\n for result in results:\n for detection in result:\n detection = np.array(detection)\n detections.append(detection[:4])\n if score < detection[4]:\n score = detection[4]\n final_select = detection[:4]\n if DEBUG_MODE:\n print \"Final score: %f, total number of detections: %d\" % (score, len(detections))\n # If visualize is set to true, display the working\n # of the sliding window \n if DEBUG_MODE: \n clone = image.copy()\n for x1, y1, _, _ in detections:\n # Draw the detections at this scale\n x1, y1 = int(x1), int(y1)\n cv2.rectangle(clone, (x1, y1), (x1 + BBOX_SIZE[1], y1 +\n BBOX_SIZE[0]), (0, 0, 0), thickness=2)\n\n # Draw current best\n if final_select is not None:\n x1, y1, _, _ = final_select\n x1, y1 = int(x1), int(y1)\n cv2.rectangle(clone, (x1, y1), (x1 + BBOX_SIZE[1], y1 +\n BBOX_SIZE[0]), (0, 255, 0), thickness=2)\n clone_resize = cv2.resize(clone, VIZ_SIZE)\n if init_detection:\n clone_resize = swapChannels(clone_resize)\n if WRITE_TMP_RESULT:\n cv2.imwrite(\"Localization.png\", clone_resize)\n else:\n cv2.imshow(\"Localization\", clone_resize)\n\n if score >= PROB_CRITERIA:\n return tuple(final_select)\n else:\n return None", "def video_stabilizer(self, video=None):\r\n def in_roi(roi, p):\r\n x, y = p\r\n return roi['x1'] < x < roi['x2'] and roi['y1'] < y < roi['y2']\r\n\r\n \r\n if video is None:\r\n video = self.video_buffer\r\n stab_video = np.zeros_like(video)\r\n roi = self.get_roi(video=video, window_name='Draw ROI to stabilize the video around it')\r\n\r\n # params for ShiTomasi corner detection\r\n feature_params = dict(maxCorners=800,\r\n qualityLevel=0.01,\r\n minDistance=3,\r\n blockSize=3)\r\n \r\n # Parameters for lucas kanade optical flow\r\n lk_params = dict(winSize=(15, 15),\r\n maxLevel=8,\r\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\r\n \r\n m_dx, m_dy = 0, 0\r\n \r\n # Take first frame and find corners in it\r\n old_frame = video[0]\r\n \r\n rows, cols, depth = old_frame.shape\r\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\r\n \r\n p0 = cv2.goodFeaturesToTrack(old_gray, \r\n mask=None, \r\n **feature_params)\r\n p0 = np.expand_dims([p for p in p0.squeeze() if in_roi(roi, p)], 1)# p0.copy()\r\n \r\n for idx in tqdm(range(len(video))):\r\n \r\n # Get next frame\r\n frame = video[idx]\r\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n \r\n # calculate optical flow\r\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\r\n \r\n # Select good points\r\n try:\r\n good_cur = p1[np.where(st == 1)]\r\n good_old = p0[np.where(st == 1)]\r\n except TypeError as e:\r\n print('TypeError, no good points are avaliabole, error: {0}'.format(e))\r\n print('Exit video stabilizer at frame {0} out of {1}'.format(idx, self.length))\r\n break\r\n \r\n dx = []\r\n dy = [] \r\n \r\n # Draw points and calculate\r\n for i, (cur, old) in enumerate(zip(good_cur, good_old)):\r\n a, b = cur.ravel()\r\n c, d = old.ravel()\r\n dx.append(c - a)\r\n dy.append(d - b)\r\n \r\n m_dx += np.mean(dx)\r\n m_dy += np.mean(dy)\r\n print(m_dx,m_dy)\r\n \r\n M = np.float32([[1, 0, m_dx], [0, 1, m_dy]])\r\n \r\n stab_video[idx] = cv2.warpAffine(frame, M, (cols, rows), \r\n cv2.INTER_NEAREST|cv2.WARP_INVERSE_MAP, \r\n cv2.BORDER_CONSTANT).copy()\r\n\r\n marked = stab_video[idx].copy()\r\n for p in np.squeeze(good_cur):\r\n marked = cv2.circle(marked, tuple(p.astype(int)), 5, (255,0,0), 2)\r\n cv2.imshow('stab', marked)\r\n cv2.waitKey(0)\r\n\r\n\r\n\r\n # Update the previous frame and previous points\r\n old_gray = frame_gray.copy()\r\n p0 = good_cur.reshape(-1, 1, 2)\r\n cv2.destroyAllWindows()\r\n return stab_video", "def get_spoof_features2(img):\n roi = img # img_bgr[y:y+h, x:x+w]\n\n img_ycrcb = cv2.cvtColor(roi, cv2.COLOR_BGR2YCR_CB)\n img_luv = cv2.cvtColor(roi, cv2.COLOR_BGR2LUV)\n\n ycrcb_hist = calc_hist(img_ycrcb)\n luv_hist = calc_hist(img_luv)\n\n feature_vector = np.append(ycrcb_hist.ravel(), luv_hist.ravel())\n feature_vector = feature_vector.reshape(1, len(feature_vector))\n\n return feature_vector", "def find_tfl_lights(image: np.ndarray):\n kernel = np.array(\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [1, 3, 1],\n [0, 1, 0]])\n\n kernel = kernel - kernel.mean()\n\n red_image = image.copy()\n red_image = red_image[:, :, 0]\n _, red_image = cv2.threshold(red_image, 200, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(red_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n red_points = np.where(mask)\n positions = []\n final_red_points = []\n for point1 in range(len(red_points[0])):\n point = (red_points[0][point1], red_points[1][point1])\n pixel = image[point[0], point[1]]\n if (pixel[1] < 170 or pixel[2] < 120) and pixel[0] >= 200:\n final_red_points.append(point)\n final_red_points = filter_points(final_red_points)\n positions += final_red_points\n auxilary = ['r'] * len(positions)\n red_x = [val[1] for val in final_red_points]\n red_y = [val[0] for val in final_red_points]\n green_image = image.copy()\n green_image = green_image[:, :, 1]\n _, green_image = cv2.threshold(green_image, 190, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(green_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n green_points = np.where(mask)\n final_green_points = []\n for point1 in range(len(green_points[0])):\n point = (green_points[0][point1], green_points[1][point1])\n pixel = image[point[0], point[1]]\n if pixel[0] <= 180 and pixel[1] >= 220 and pixel[2] >= 160:\n final_green_points.append(point)\n\n final_green_points = filter_points(final_green_points)\n positions += final_green_points\n auxilary += ['g'] * len(final_green_points)\n green_x = [val[1] for val in final_green_points]\n green_y = [val[0] for val in final_green_points]\n print(f\"There are {len(green_x) + len(red_x)} points\")\n return positions, auxilary", "def mouseImage(event, x, y, flags, param):\n \n if event==cv.CV_EVENT_LBUTTONDOWN: #Clicked the left button\n print \"x, y are\", x, y\n (b,g,r) = D.image[y,x]\n print \"r,g,b is\", int(r), int(g), int(b)\n (h,s,v) = D.hsv[y,x]\n print \"h,s,v is\", int(h), int(s), int(v)\n D.down_coord = (x,y)\n D.mouse_down = True\n \n elif event==cv.CV_EVENT_LBUTTONUP: #Let go of the left button\n print \"x, y are\", x, y\n (b,g,r) = D.image[y,x]\n print \"r,g,b is\", int(r), int(g), int(b)\n (h,s,v) = D.hsv[y,x]\n print \"h,s,v is\", int(h), int(s), int(v)\n D.up_coord = (x,y)\n D.mouse_down = False\n\n if D.mode == \"clear\":\n D.sections = []\n else: #Start, add, or subtract -- put lower coordinates first\n x0, y0, x1, y1 = D.down_coord[0], D.down_coord[1], D.up_coord[0], D.up_coord[1]\n\n if x0 > x1:\n x0, x1 = x1, x0\n if y0 > y1:\n y0, y1 = y1, y0\n \n if D.mode == \"start\":\n D.sections = []\n mode_dict = {\"start\":'a', \"add\":'a', \"subtract\":'s'}\n D.sections.append([mode_dict[D.mode], (x0, y0), (x1, y1)])\n ImageProcessing.process_section(D)\n\n\n elif event == cv.CV_EVENT_RBUTTONDOWN: #Right click\n D.target_coord = (x, y)\n ImageProcessing.target_coord(D)\n\n\n elif D.mouse_down and event==cv.CV_EVENT_MOUSEMOVE: #Mouse just moved\n D.up_coord = (x,y)", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def dist_train(imgdir, imgfmt):\r\n global square_width, row, col, content\r\n counter, loop_ctr = 0, 1\r\n\r\n # termination criteria\r\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, square_width, 0.001)\r\n\r\n # arrays to store object points and image points from all the images\r\n objpoints = [] # 3d point in real world space\r\n imgpoints = [] # 2d points in image plane\r\n images = glob.glob(imgdir+'*'+imgfmt)\r\n images.sort()\r\n\r\n gray = np.zeros((0, 0))\r\n for fname in images:\r\n img = cv2.imread(fname)\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n objp = np.zeros((row*col, 3), np.float32)\r\n objp[:, :2] = np.mgrid[0:row, 0:col].T.reshape(-1, 2)\r\n ret, corners = cv2.findChessboardCorners(gray, (row, col), None)\r\n\r\n # if chessboard corners are found, add object points and image points to respective lists (after refining them)\r\n ind_line = fname.rindex(os.sep)\r\n ind_dot = fname.rindex('.')\r\n if ret == True:\r\n objpoints.append(objp)\r\n corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\r\n imgpoints.append(corners2)\r\n # Draw and display the corners\r\n img = cv2.drawChessboardCorners(img, (row, col), corners2, ret)\r\n counter += 1\r\n save_image(fname[0:ind_line+1] + \"success/\", fname[ind_line+1:ind_dot],img)\r\n else:\r\n save_image(fname[0:ind_line + 1] + \"fail/\", fname[ind_line + 1:ind_dot], img)\r\n cv2.destroyAllWindows()\r\n loop_ctr += 1\r\n\r\n if len(images) > 0:\r\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\r\n # get the calibration error\r\n error = calib_error(objpoints, imgpoints, rvecs, tvecs, mtx, dist)\r\n content += imgdir + ':\\n Number of qualified images = ' + str(counter) + '\\n Average calibration error = ' + str(np.round(error, 4)) + ' pixels\\n'\r\n return ret, mtx, dist, rvecs, tvecs, error\r\n else:\r\n content += imgdir + ':\\n No images in this folder. Cannot compute camera parameters.\\n'\r\n return 0, 0, 0, 0, 0, float('inf')", "def test(self, img_path):\n import cv2 \n\n self.load_data_test(path=img_path)\n self.C.horizontal_flips = False\n self.C.vertical_flips = False\n self.C.rotate_90 = False\n\n st = time.time()\n\n from .utils.data_generators import format_img_size\n from .utils.data_generators import format_img_channels\n from .utils.data_generators import format_img\n from .utils.data_generators import get_real_coordinates\n\n if self.cnn_name == 'vgg16' or self.cnn_name == 'vgg19':\n num_feature = 512\n else:\n num_feature = 1024 # any other convNet\n \n input_shape_img = (None, None, 3)\n input_shape_features = (None, None, num_feature)\n\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(self.C.num_roi, 4))\n feature_map_input = Input(shape=input_shape_features)\n\n # define the base network\n shared_layers = self.cnn_model.nn_base(img_input, trainable=True)\n\n # define the RPN, built on the base layers\n num_anchors = len(self.C.anchor_scales) * len(self.C.anchor_ratios)\n rpn_layers = self.region_proposal_net(shared_layers, num_anchors)\n classifier = self.classifier(feature_map_input, \n self.cnn_model.classifier_layers, \n roi_input, \n self.C.num_roi, \n num_class=len(self.class_mapping), \n trainable=True)\n\n model_rpn = Model(img_input, rpn_layers)\n model_classifier_only = Model([feature_map_input, roi_input], classifier)\n model_classifier = Model([feature_map_input, roi_input], classifier)\n\n print('Loading weights from {}'.format(self.C.model_path))\n model_rpn.load_weights(self.C.model_path, by_name=True)\n model_classifier.load_weights(self.C.model_path, by_name=True)\n\n model_rpn.compile(optimizer='sgd', loss='mse')\n model_classifier.compile(optimizer='sgd', loss='mse')\n\n for i in range(len(self.test_images)):\n img = cv2.imread(self.test_images[i])\n X, ratio = format_img(img, self.C)\n X = np.transpose(X, (0, 2, 3, 1))\n\n # get the feature maps and output from the RPN\n [Y1, Y2, F] = model_rpn.predict(X)\n\n R = roi_helpers.rpn_to_roi(Y1, Y2, self.C, K.image_data_format(), overlap_thresh=0.7)\n\n # convert from (x1,y1,x2,y2) to (x,y,w,h)\n R[:, 2] -= R[:, 0]\n R[:, 3] -= R[:, 1]\n\n # apply the spatial pyramid pooling to the proposed regions\n bboxes = {}\n probs = {}\n\n for jk in range(R.shape[0] // self.C.num_roi+1):\n ROIs = np.expand_dims(R[self.C.num_roi*jk:self.C.num_roi*(jk+1), :], axis=0)\n if ROIs.shape[1] == 0:\n break\n\n if jk == R.shape[0] // self.C.num_roi:\n # pad R\n curr_shape = ROIs.shape\n target_shape = (curr_shape[0], self.C.num_roi, curr_shape[2])\n ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)\n ROIs_padded[:, :curr_shape[1], :] = ROIs\n ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]\n ROIs = ROIs_padded\n\n [P_cls, P_regr] = model_classifier_only.predict([F, ROIs])\n\n for ii in range(P_cls.shape[1]):\n if np.max(P_cls[0, ii, :]) < self.C.bbox_threshold or \\\n np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):\n continue\n\n cls_name = self.class_mapping[np.argmax(P_cls[0, ii, :])]\n if cls_name not in bboxes:\n bboxes[cls_name] = []\n probs[cls_name] = []\n\n (x, y, w, h) = ROIs[0, ii, :]\n cls_num = np.argmax(P_cls[0, ii, :])\n try:\n (tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)]\n tx /= C.class_regress_std[0]\n ty /= C.class_regress_std[1]\n tw /= C.class_regress_std[2]\n th /= C.class_regress_std[3]\n x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)\n except:\n pass\n \n bboxes[cls_name].append([self.C.stride*x, \n self.C.stride*y, \n self.C.stride*(x+w), \n self.C.stride*(y+h)])\n probs[cls_name].append(np.max(P_cls[0, ii, :]))\n\n all_detections = []\n\n for key in bboxes:\n bbox = np.array(bboxes[key])\n new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, \n np.array(probs[key]), overlap_thresh=0.5)\n \n for jk in range(new_boxes.shape[0]):\n (x1, y1, x2, y2) = new_boxes[jk,:]\n (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2)\n\n cv2.rectangle(img,(real_x1, real_y1), \n (real_x2, real_y2), \n (int(self.class_to_color[key][0]), \n int(self.class_to_color[key][1]), \n int(self.class_to_color[key][2])),\n 2)\n\n textLabel = '%s: %.3f' % (key, new_probs[jk])\n all_detections.append((key, new_probs[jk]))\n\n (retval,baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1)\n text_org = (real_x1+10, real_y1+20)\n\n cv2.rectangle(img, (text_org[0], text_org[1]+baseLine), \n (text_org[0]+retval[0]+10, text_org[1]-retval[1]-10), \n (0, 0, 0), 2)\n cv2.rectangle(img, (text_org[0],text_org[1]+baseLine), \n (text_org[0]+retval[0]+10, text_org[1]-retval[1]-10), \n (255, 255, 255), -1)\n cv2.putText(img, textLabel, text_org, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)\n\n print('Elapsed time = {}'.format(time.time() - st))\n print(self.test_images[i], all_detections)\n if all_detections:\n cv2.imwrite(self.test_images_bbox[i], img)", "def loop_and_detect(cam, runtime, trt_yolov3, conf_th, vis):\n\n while True:\n if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:\n break\n timer = cv2.getTickCount()\n img = cam.read().copy()\n if img is not None:\n if runtime:\n boxes, confs, label, _preprocess_time, _postprocess_time,_network_time = trt_yolov3.detect(img, conf_th)\n img, _visualize_time = vis.draw_bboxes(img, boxes, confs, label)\n time_stamp = record_time(_preprocess_time, _postprocess_time, _network_time, _visualize_time)\n show_runtime(time_stamp)\n else:\n boxes, confs, label, _, _, _ = trt_yolov3.detect(img, conf_th)\n img, _ = vis.draw_bboxes(img, boxes, confs, label)\n \n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n img = show_fps(img, fps)\n cv2.imshow(WINDOW_NAME, img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break", "def __init__(self):\n self.active = True # Camera activation control\n self.stream = cv2.VideoCapture(0) # Open video stream\n while not self.stream.isOpened():\n pass\n _,self.image = self.stream.read()# Save the first frame\n cv2.waitKey(10)\n self.frame = self.image[196:304,:546,:]# Cropped frame\n self.diff_frame = self.frame\n# self.reference_frame = copy.deepcopy(self.frame)\n# self.abs_diff_frame = copy.deepcopy(self.frame)\n self.reference_frame = self.frame\n self.abs_diff_frame = self.frame\n self.frame_count = 1 # Used for framerate estimation\n self.frame_rate = 0\n self.tic = time()", "def detectBall():\n\t\n\tglobal np_arr\n\timage_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) # OpenCV >= 3.0:\n\n\tblackLower = (0, 0, 0) \n\tblackUpper = (5,50,50)\n\tredLower = (0, 50, 50)\n\tredUpper = (5, 255, 255)\n\tyellowLower = (25, 50, 50) \n\tyellowUpper = (35, 255, 255)\n\tgreenLower = (50, 50, 50) \n\tgreenUpper = (70, 255, 255)\n\tblueLower = (100, 50, 50) \n\tblueUpper = (130, 255, 255)\n\tmagentaLower = (125, 50, 50) \n\tmagentaUpper = (150, 255, 255)\n\n blurred = cv2.GaussianBlur(image_np, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n\n\tmask_blk = cv2.inRange(hsv, blackLower, blackUpper)\n mask_blk = cv2.erode(mask_blk, None, iterations=2)\n mask_blk = cv2.dilate(mask_blk, None, iterations=2)\n\n\tmask_r = cv2.inRange(hsv, redLower, redUpper)\n mask_r = cv2.erode(mask_r, None, iterations=2)\n mask_r = cv2.dilate(mask_r, None, iterations=2)\n\n\tmask_y = cv2.inRange(hsv, yellowLower, yellowUpper)\n mask_y = cv2.erode(mask_y, None, iterations=2)\n mask_y = cv2.dilate(mask_y, None, iterations=2)\n\n\tmask_g = cv2.inRange(hsv, greenLower, greenUpper)\n mask_g = cv2.erode(mask_g, None, iterations=2)\n mask_g = cv2.dilate(mask_g, None, iterations=2)\n\n mask_blu = cv2.inRange(hsv, blueLower, blueUpper)\n mask_blu = cv2.erode(mask_blu, None, iterations=2)\n mask_blu = cv2.dilate(mask_blu, None, iterations=2)\n\n\tmask_m = cv2.inRange(hsv, magentaLower, magentaUpper)\n mask_m = cv2.erode(mask_m, None, iterations=2)\n mask_m = cv2.dilate(mask_m, None, iterations=2)\n #cv2.imshow('mask', mask)\n\n cnts_blk = cv2.findContours(mask_blk.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\tcnts_r = cv2.findContours(mask_r.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\tcnts_y = cv2.findContours(mask_y.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\tcnts_g = cv2.findContours(mask_g.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\tcnts_blu = cv2.findContours(mask_blu.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\tcnts_m = cv2.findContours(mask_m.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\n cnts_blk = imutils.grab_contours(cnts_blk)\n\tcnts_r = imutils.grab_contours(cnts_r)\n\tcnts_y = imutils.grab_contours(cnts_y)\n\tcnts_g = imutils.grab_contours(cnts_g)\n\tcnts_blu = imutils.grab_contours(cnts_blu)\n\tcnts_m = imutils.grab_contours(cnts_m)\n\n center = None\n\tc = 0\n\tradius = 0\n\n\tglobal black_ball, red_ball, yellow_ball, green_ball, blue_ball, magenta_ball\n\n # only proceed if at least one contour was found\n if len(cnts_blk) > 0 and black_ball.detected_flag != True:\n c = max(cnts_blk, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t black_ball = track(radius, center, black_ball)\n\t pub_blackBall.publish(black_ball)\n\t print (\"Black ball detected.\\n\")\n\n\telif len(cnts_r) > 0 and red_ball.detected_flag != True:\n c = max(cnts_r, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t red_ball = track(radius, center, red_ball)\n\t pub_redBall.publish(red_ball)\n\t print (\"Red ball detected.\\n\")\n\t \n\telif len(cnts_y) > 0 and yellow_ball.detected_flag != True:\n c = max(cnts_y, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t yellow_ball = track(radius, center, yellow_ball)\n\t pub_yellowBall.publish(yellow_ball)\n\t print (\"Yellow ball detected.\\n\")\n\n\telif len(cnts_g) > 0 and green_ball.detected_flag != True:\n c = max(cnts_g, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t green_ball = track(radius, center, green_ball)\n\t pub_greenBall.publish(green_ball)\n\t print (\"Green ball detected.\\n\")\n\n\telif len(cnts_blu) > 0 and blue_ball.detected_flag != True:\n c = max(cnts_blu, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t blue_ball = track(radius, center, blue_ball)\n\t pub_blueBall.publish(blue_ball)\n\t print (\"Blue ball detected.\\n\")\n\n\telif len(cnts_m) > 0 and magenta_ball.detected_flag != True:\n c = max(cnts_m, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t magenta_ball = track(radius, center, magenta_ball)\n\t pub_magentaBall.publish(magenta_ball)\n\t print (\"Magenta ball detected.\\n\")\n\n\tcv2.imshow('window', image_np)\n\tcv2.waitKey(2)\n\tc = 0", "def get_classification(self, image, wp = 0):\n\n cv2_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) \n input_image = np.expand_dims(cv2_image, axis=0)\n (boxes, scores, classes) = self.sess.run([self.boxes, self.scores, self.classes], \n feed_dict={self.image_tensor: input_image})\n\n prediction = 4\n min_score_thresh=.6\n sq_boxes = np.squeeze(boxes)\n sq_classes = np.squeeze(classes).astype(np.int32)\n sq_scores = np.squeeze(scores)\n\n for i in range(sq_boxes.shape[0]):\n if sq_scores is None or sq_scores[i] > min_score_thresh:\n prediction = sq_classes[i]\n min_score_thresh = sq_scores[i]\n print(\"Found traffic light: {i:%d prediction:%s pred_score:%.4f}\"%(i, prediction, sq_scores[i]))\n \n if prediction == 1:\n return TrafficLight.RED\n elif prediction == 2:\n return TrafficLight.YELLOW\n elif prediction == 3:\n return TrafficLight.GREEN\n return TrafficLight.UNKNOWN", "def example():\n\n log.info(\"LFW prediction example\")\n\n class_id_to_label_mapping, label_to_class_id_mapping = load_class_id_to_label_mapping(\"lfw\")\n classes = len(class_id_to_label_mapping.keys())\n\n with Predictor(\"lfw\", (250, 250, 3), classes) as p:\n # Predict the first image\n colin_image = Path(LFW_DIR) / Path(\"Colin_Powell/Colin_Powell_0023.jpg\")\n image = cv.imread(str(colin_image))\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n x_test = image.reshape(1, image.shape[0], image.shape[1], image.shape[2])\n\n predicted_class_id = p.predict(x_test)\n predicted_class_id = int(np.squeeze(predicted_class_id))\n\n log.info(\"Prediction: %s\" % (class_id_to_label_mapping[str(predicted_class_id)]))\n\n # Predict second image\n colin_image2 = Path(LFW_DIR) / Path(\"Colin_Powell/Colin_Powell_0024.jpg\")\n image = cv.imread(str(colin_image2))\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n x_test = image.reshape(1, image.shape[0], image.shape[1], image.shape[2])\n\n predicted_class_id = p.predict(x_test)\n predicted_class_id = int(np.squeeze(predicted_class_id))\n\n log.info(\"Prediction: %s\" % (class_id_to_label_mapping[str(predicted_class_id)]))\n\n with Predictor(\"lfw\", (250, 250, 3), classes) as p:\n # Predict the third image in a different session\n colin_image2 = Path(LFW_DIR) / Path(\"Colin_Powell/Colin_Powell_0025.jpg\")\n image = cv.imread(str(colin_image2))\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n x_test = image.reshape(1, image.shape[0], image.shape[1], image.shape[2])\n\n predicted_class_id = p.predict(x_test)\n predicted_class_id = int(np.squeeze(predicted_class_id))\n\n log.info(\"Prediction: %s\" % (class_id_to_label_mapping[str(predicted_class_id)]))", "def image_pre_filtering(left_img: np.ndarray, right_img: np.ndarray) -> tuple:\n\n def clahe(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply Contrast Limited Adaptive Histogram Equalization\n :param image: the image to be filtered\n :return: the image filtered with CLAHE\n \"\"\"\n clahe_filter = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n return clahe_filter.apply(image)\n\n def logarithmic(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply Logarithmic Transform\n :param image: the image to be filtered\n :return: the image filtered with logarithmic transform\n \"\"\"\n c = max_disparity / math.log(1 + np.max(image))\n sigma = 1\n for i in range(0, image.shape[1]): # image width\n for j in range(0, image.shape[0]): # image height\n # compute logarithmic transform\n image[j, i] = int(c * math.log(1 + ((math.exp(sigma) - 1) * image[j, i])))\n return image\n\n def exponential(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Perform pre-processing - raise to the power, as this subjectively appears\n to improve subsequent disparity calculation\n :param image:\n :return:\n \"\"\"\n return np.power(image, 0.75).astype('uint8')\n\n def apply_filter(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Choose which filter to apply to both images, this could be a combination too\n :param image: the image to be filtered\n :return:\n \"\"\"\n # choose filters to apply\n return clahe(image)\n\n return apply_filter(left_img), apply_filter(right_img)", "def object_detection(): # needs to be modified so definition can be called as part of main function\r\n green_lower = (29, 86, 6) # define the lower boundaries of the \"green\"\r\n green_upper = (64, 255, 255) # define the upper boundaries of the \"green\"\r\n pts = deque(maxlen=args[\"buffer\"]) # ball in the HSV color space, then initialize the list of tracked points\r\n\r\n if not args.get(\"video\", False): # if a video path was not supplied, grab the reference to the picam\r\n vs = VideoStream(usePiCamera=args[\"picamera\"] > 0).start()\r\n else: # otherwise, grab a reference to the video file\r\n vs = cv2.VideoCapture(args[\"video\"])\r\n time.sleep(2.0) # allow the camera or video file to warm up\r\n while True: # keep looping\r\n frame = vs.read() # grab the current frame\r\n frame = frame[1] if args.get(\"video\", False) else frame # handle the frame from VideoCapture or VideoStream\r\n if frame is None: # if viewing video and did not grab frame, then reached end of video\r\n break\r\n frame = imutils.resize(frame, width=600) # resize the frame\r\n blurred = cv2.GaussianBlur(frame, (11, 11), 0) # blur it\r\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) # and convert it to the HSV color space\r\n\r\n mask = cv2.inRange(hsv, green_lower, green_upper) # construct a mask for the color \"green\"\r\n mask = cv2.erode(mask, None, iterations=2) # then perform a series of erosions\r\n mask = cv2.dilate(mask, None, iterations=2) # and dilations to remove any small blobs left in the mask\r\n\r\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_SIMPLE) # find contours in the mask\r\n cnts = imutils.grab_contours(cnts)\r\n center = None # and initialize the current (x, y) center of the ball\r\n\r\n if len(cnts) > 0: # only proceed if at least one contour was found\r\n c = max(cnts, key=cv2.contourArea) # find the largest contour in the mask\r\n ((x, y), radius) = cv2.minEnclosingCircle(c) # then use it to compute minimum enclosing circle and centroid\r\n M = cv2.moments(c) # calculate moments\r\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"])) # use moment to find centroid in x,y\r\n if radius > 10: # only proceed if the radius meets a minimum size\r\n cv2.circle(frame, (int(x), int(y)), int(radius),\r\n (0, 255, 255), 2) # draw the circle\r\n cv2.circle(frame, center, 5, (0, 0, 255), -1) # draw the centroid\r\n object_tracking(int(x), int(y)) # update the list of tracked points\r\n\r\n pts.appendleft(center) # update the points queue\r\n for i in range(1, len(pts)): # loop over the set of tracked points\r\n if pts[i - 1] is None or pts[i] is None: # if either of the tracked points are None, ignore them\r\n continue\r\n thickness = int(np.sqrt(args[\"buffer\"] / float(i + 1)) * 2.5) # otherwise, compute thickness of line\r\n cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness) # draw the connecting lines\r\n\r\n cv2.imshow(\"Frame\", frame) # show the frame to our screen\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord(\"q\"): # if the 'q' key is pressed, stop the loop\r\n break\r\n\r\n if not args.get(\"video\", False): # if we are not using a video file, stop the camera video stream\r\n vs.stop()\r\n else: # otherwise, release the camera\r\n vs.release()\r\n cv2.destroyAllWindows() # close all windows\r", "def main():\n import sys\n if len(sys.argv) < 3:\n print(\"Please list database and sobel threshold.\")\n return\n import numpy as np\n import cv2\n import glob\n import age_class\n # store all image paths in list\n img_list = glob.glob(sys.argv[1] + '*')\n # threshold for edge detection\n sobel_thres = int(sys.argv[2])\n feature_list = []\n for i in img_list:\n print(i)\n if i != \"face_dataset/.jpg\":\n img_color = cv2.imread(i)\n img_color = cv2.resize(img_color, (150, 200))\n height, width = 200, 150\n img_gray = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)\n img_gray = age_class.dynamic_range(img_gray)\n\n # location phase\n locs = age_class.location_phase(img_gray.copy(), sobel_thres)\n eye_pos, left_eye, right_eye, nose_pos, mouth_pos, mouth_area = locs\n # feature extraction phase\n feats = age_class.feature_extraction(img_gray.copy(), eye_pos, \n left_eye, right_eye, nose_pos, mouth_pos, mouth_area, 40)\n # place all feature data into feature matrix\n l = []\n for n in range(len(feats[0])):\n for key in feats[0][n].keys():\n l.append(feats[0][n][key])\n l.append(feats[1])\n l.append(feats[2])\n feature_list.append(l)\n\n feature_list = np.array(feature_list)\n print(feature_list.shape)", "def LKTrackerImageToImage(imageOld, pixelCoordsOld, imageNew,\n pixelCoordsNew, windowSize):\n # imageOld = cv2.cvtColor(imageOld, cv2.COLOR_BGR2GRAY)\n # imageNew = cv2.cvtColor(imageNew, cv2.COLOR_BGR2GRAY)\n\n # Get top left corner of window.\n\n topLeftX1, topLeftY1 = pixelCoordsOld - windowSize // 2\n topLeftX2, topLeftY2 = pixelCoordsNew - windowSize // 2\n\n # Compute horizontal and vertical gradients for the original frame.\n gx = utils.pixelDiffImages(imageOld,\n topLeftX1,\n topLeftY1,\n imageOld,\n topLeftX1,\n topLeftY1 - 1,\n windowSize,\n windowSize)\n \n gy = utils.pixelDiffImages(imageOld,\n topLeftX1,\n topLeftY1,\n imageOld,\n topLeftX1 - 1,\n topLeftY1,\n windowSize,\n windowSize)\n \n # Compute difference between original and new frames.\n diff = utils.pixelDiffImages(imageOld,\n topLeftX1,\n topLeftY1,\n imageNew,\n topLeftX2,\n topLeftY2,\n windowSize,\n windowSize)\n\n # Compute components of Harris matrix.\n Ixx = gx ** 2\n Iyy = gy ** 2\n Ixy = gx * gy\n\n # Compute Gaussian kernel for weighting pixels in the window.\n gkern = np.outer(signal.gaussian(windowSize, 2.5),\n signal.gaussian(windowSize, 2.5))\n\n # Construct matrices and solve the matrix-vector equation to get the\n # movement of the pixel.\n Z = np.array([[np.sum(Ixx * gkern), np.sum(Ixy * gkern)],\n [np.sum(Ixy * gkern), np.sum(Iyy * gkern)]])\n b = np.array([np.sum(diff * gx * gkern), np.sum(diff * gy * gkern)])\n d = np.linalg.solve(Z, b)\n\n # Compute new position of pixel\n return pixelCoordsNew + d[: : -1]", "def gain_box_score(im, preds):\n if len(preds[0]) == 0:\n cv2.imshow(\"Video detection\", im)\n else:\n for pred in preds:\n for i, box_label in enumerate(zip( pred[\"boxes\"], pred[\"labels\"] )):\n box, label = box_label\n xmin, ymin, xmax, ymax = box\n#-------------------- Create a Rectangle patch ----------------------- \n if label==1:\n class_name='with_mask'\n color = (0, 255, 0)\n elif label==2:\n class_name='without_mask'\n color = (0, 0, 255)\n elif label==3:\n class_name='mask_worn_improperly'\n color = (255, 255 ,0)\n score = pred['scores'][i]\n#--------------------- Bounding Box painting -------------------------- \n if score > 0.65:\n cv2.rectangle(im, (xmin, ymin), (xmax, ymax), color, 1) \n cv2.putText(im, str(class_name)+str(round(score.item(),2)), (xmin,int(ymax-ymax/20)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1) #print class name\n cv2.imshow(\"Video detection\",im)\n print('*****', 'Bbox:', i , '*****' )\n print('Class: ', str(class_name))\n print('Scores: ', str(round(score.item(),2)))\n print('boxes: ',f'{int(xmin)}, {int(ymin)}, {int(xmax)}, {int(ymax)}')\n print('image shape: ', im.shape) \n else:\n cv2.imshow(\"Video detection\", im)\n print('********************','\\n')", "def detect_and_display(self, frame):\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n frame_gray = cv.equalizeHist(frame_gray)\n # -- Detect faces\n faces = self.face_cascade.detectMultiScale(frame_gray)\n center = (self.image_width / 2.0, self.image_height / 2.0)\n for (x, y, w, h) in faces:\n center = (x + w // 2, y + h // 2)\n frame_gray = cv.ellipse(frame_gray, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 4)\n # faceROI = frame_gray[y:y + h, x:x + w]\n # # -- In each face, detect eyes\n # eyes = eyes_cascade.detectMultiScale(faceROI)\n # for (x2, y2, w2, h2) in eyes:\n # eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2)\n # radius = int(round((w2 + h2) * 0.25))\n # frame = cv.circle(frame, eye_center, radius, (255, 0, 0), 4)\n break\n ros_image = self.bridge.cv2_to_imgmsg(frame_gray, \"8UC1\")\n self.ros_pub_image.publish(ros_image)\n error_x = center[0] - self.image_width / 2.0\n error_y = center[1] - self.image_height / 2.0\n\n return error_x, error_y", "def demo(sess, net, image_name):\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n #im_file = os.path.join('/home/corgi/Lab/label/pos_frame/ACCV/training/000001/',image_name)\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print (('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n vis_detections(im, cls, dets, ax, thresh=CONF_THRESH)", "def main(path,path_save_image):\n transformations={'v':[cv2.flip,1],\n 'h':[cv2.flip,0],\n 'c':[cv2.rotate,cv2.ROTATE_90_CLOCKWISE],\n 'cc':[cv2.rotate,cv2.ROTATE_90_COUNTERCLOCKWISE]}\n\n files=os.listdir(path)\n \n name_images=[file for file in files if \n (file.endswith('.jpg') or \n file.endswith('.png') or \n file.endswith('.JPG') or\n file.endswith('.PNG'))]\n\n for index,name in enumerate(name_images):\n print('****************************')\n print(index)\n image = path+'/'+name\n print(name)\n imgcv_p = cv2.imread(image)\n #RGB_img = cv2.cvtColor(imgcv_p, cv2.COLOR_BGR2RGB)\n plt.figure(figsize=(10,7))\n plt.imshow(imgcv_p), plt.axis(\"on\")\n plt.show()\n cv2.waitKey(0)\n plt.close()\n\n list_transform = input(\"transformations : \").split(\" \")\n if list_transform[0]!=\"n\":\n for transform in list_transform:\n imgcv_p=transformations.get(transform)[0](\n imgcv_p,transformations.get(transform)[1])\n cv2.imwrite(path_save_image+\"/\"+name, imgcv_p)", "def test_affine():\n root_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/\"\n seq_dir = root_dir + \"sequences/\"\n affine_dir = root_dir + \"affine_orig_v2/\"\n MIN_MATCH_COUNT = 10\n # 1088 is more accurate\n seqs_sample = '''\n uav0000249_00001_v\n uav0000249_02688_v\n '''\n seqs_str = seqs_sample\n seqs = [seq.strip() for seq in seqs_str.split()]\n for seq in seqs:\n print(seq)\n with open(os.path.join(affine_dir, seq+'.pickle'), 'rb') as fin:\n affine_dict = pickle.load(fin)\n seq_files = os.listdir(os.path.join(seq_dir, seq))\n seq_files = sorted(seq_files, key=lambda x: int(x[:-4]))\n for i in range(34, len(seq_files)-1):\n frame_name = \"{:07d}.jpg\".format(i)\n M = affine_dict[frame_name]\n print(i)\n image0 = cv2.imread(os.path.join(seq_dir, seq, seq_files[i]))\n image1 = cv2.imread(os.path.join(seq_dir, seq, seq_files[i+1]))\n image0 = cv2.cvtColor(image0, cv2.COLOR_BGR2GRAY)\n image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\n # surf = cv2.xfeatures2d.SURF_create()\n # kp0, des0 = surf.detectAndCompute(image0, None)\n # kp1, des1 = surf.detectAndCompute(image1, None)\n # FLANN_INDEX_KDTREE = 0\n # index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n # search_params = dict(checks=10)\n #\n # flann = cv2.FlannBasedMatcher(index_params, search_params)\n # matchs = flann.knnMatch(des0, des1, k=2)\n #\n # # store all the good matchs as per Lowe's ratio test\n # good = []\n # for m, n in matchs:\n # if m.distance < 0.7 * n.distance:\n # good.append(m)\n # if len(good) > MIN_MATCH_COUNT:\n # src_pts = np.float32([kp0[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n # dst_pts = np.float32([kp1[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n # M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n # else:\n # M = np.eye(3, 3)\n\n image0_transform = cv2.warpPerspective(image0, M, (image0.shape[1], image0.shape[0]))\n bbox = np.array([540, 540, 600, 1079])\n bbox_expand = np.ones((3, 2))\n bbox_expand[:2, 0] = bbox[:2]\n bbox_expand[:2, 1] = bbox[2:]\n bbox_expand = np.dot(M, bbox_expand)\n bbox_transform = np.concatenate([bbox_expand[:2, 0], bbox_expand[:2, 1]])\n bbox_transform = bbox_transform.astype(np.uint64)\n\n # show the images\n plt.figure(i, figsize=(16, 9))\n plt.subplot(2, 2, 1)\n cv2.rectangle(image0, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)\n cv2.rectangle(image0, (bbox_transform[0], bbox_transform[1]), (bbox_transform[2], bbox_transform[3]),\n (0, 0, 255), 2)\n plt.imshow(image0)\n plt.subplot(2, 2, 2)\n cv2.rectangle(image1, (bbox_transform[0], bbox_transform[1]), (bbox_transform[2], bbox_transform[3]),\n (0, 255, 0), 2)\n plt.imshow(image1)\n plt.subplot(2, 2, 3)\n cv2.rectangle(image0_transform, (bbox_transform[0], bbox_transform[1]), (bbox_transform[2], bbox_transform[3]),\n (0, 255, 0), 2)\n plt.imshow(image0_transform)\n plt.show()", "def _check_calibration():\n image_list = glob.glob(os.path.join(\"C:\\\\Users\\\\chuyangl\\\\Desktop\\\\liushuai\\\\calibrator\\\\board\\\\left\", \"*.bmp\"))\n for single_img in image_list:\n image = cv2.imread(single_img)\n new_image = un_distort_image(image)\n cv2.imshow('before', cv2.resize(image, (int(image.shape[1] * 0.7), int(image.shape[0] * 0.7))))\n cv2.imshow('after', cv2.resize(new_image, (int(new_image.shape[1] * 0.7), int(new_image.shape[0] * 0.7))))\n cv2.waitKey(0)\n\n image = cv2.imread(image_list[0])\n\n # distortion_points = [ge.Point(110, 437), ge.Point(932, 151), ge.Point(1034, 331)]\n # calibration_points = [ge.Point(510, 437), ge.Point(832, 151), ge.Point(1134, 331)]\n\n distortion_points = [ge.Point(110, 437), ge.Point(632, 151), ge.Point(333, 331)]\n calibration_points = [ge.Point(510, 437), ge.Point(532, 151), ge.Point(234, 331)]\n\n for p in distortion_points:\n cv2.circle(image, p.tuple(), 23, (0, 0, 255), 2)\n\n new_image = un_distort_image(image)\n\n for p in calibration_points:\n cv2.circle(new_image, p.tuple(), 23, (255, 0, 0), 4)\n p2 = distort_point(p)\n p3 = un_distort_point(p2)\n cv2.circle(image, p2.int().tuple(), 23, (0, 255, 255), 4)\n cv2.circle(new_image, p3.int().tuple(), 23, (0, 0, 255), 4)\n print(p.int().tuple(), p2.int().tuple(), p3.int().tuple())\n\n for p in distortion_points:\n p2 = un_distort_point(p)\n p3 = distort_point(p2)\n cv2.circle(new_image, p2.int().tuple(), 23, (0, 255, 255), 2)\n cv2.circle(image, p3.int().tuple(), 23, (0, 255, 255), 2)\n print(p.int().tuple(), p2.int().tuple(), p3.int().tuple())\n\n cv2.imshow('before', cv2.resize(image, (int(image.shape[1] * 0.7), int(image.shape[0] * 0.7))))\n cv2.imshow('after', cv2.resize(new_image, (int(new_image.shape[1] * 0.7), int(new_image.shape[0] * 0.7))))\n\n cv2.waitKey(0)", "def yolo_object_detection(image_filename, net, confidence, threshold, labels, colors):\n # read image file\n # image is an array of image data (row, column, channel)\n image = cv2.imread(image_filename)\n (H, W) = image.shape[:2]\n\n # preprocess image data with rescaling and resizing to fit YOLO input shape\n # OpenCV assumes BGR images: we have to convert to RGB, with swapRB=True\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\n # set a new input to the network\n net.setInput(blob)\n\n # get YOLOv3's output layer names\n ln = net.getLayerNames()\n ln_out = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # perform object detection\n layerOutputs = net.forward(ln_out)\n\n\n # Get the result from outputs, and filter them by confidence\n boxes = []\n scores = []\n classes = []\n for output in layerOutputs: # There are three output layers in YOLO v3\n # Filter outputs by confidence\n (xywh_filterd, score_filtered, class_filtered) = filter_outputs(output, confidence)\n\n boxes.append(xywh_filterd)\n scores.append(score_filtered)\n classes.append(class_filtered)\n\n # Change shapes of arrays so that all boxes from any output layers are stored together\n boxes = np.vstack([r for r in boxes])\n scores = np.concatenate([r for r in scores], axis=None)\n classes = np.concatenate([r for r in classes], axis=None)\n\n # Apply Non-max supression\n boxes_coord = rescale_box_coord(boxes, W, H)\n nms_idx = yolo_non_max_supression(boxes_coord, scores, confidence, threshold)\n \n # filter the good ones\n return image, [{'box':boxes[_], 'score':scores[_], 'class':classes[_]} for _ in nms_idx]", "def demo(net, image_name, conf_thres, nms_thres, resDir, bShow=False):\n global CLASSES\n\n # Load the demo image\n im_file = image_name\n im = cv2.imread(im_file)\n fname = os.path.basename(image_name)\n\n # Detect all object classes and regress object bounds\n # timers\n _t = {'im_detect' : Timer(), 'misc' : Timer(), 'save' : Timer()}\n\n _t['im_detect'].tic()\n scores, boxes = im_detect(net, im)\n _t['im_detect'].toc()\n \n \n _t['misc'].tic() \n results = np.zeros((0, 6), dtype=np.float32)\n # Visualize detections for each class\n # for cls_ind, cls in enumerate(CLASSES[1:5]):\n for cls_ind, cls in enumerate(CLASSES[1:]): \n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n \n # CPU NMS is much faster than GPU NMS when the number of boxes\n # is relative small (e.g., < 10k)\n # TODO(rbg): autotune NMS dispatch\n keep = nms(dets, nms_thres, force_cpu=True)\n dets = dets[keep, :]\n results = np.vstack( (results, np.insert(dets, 0, cls_ind, axis=1)) ) \n _t['misc'].toc() \n \n if bShow:\n plt.figure(1, figsize=(15,10))\n plt.clf()\n axe = plt.gca()\n axe.imshow(im[:,:,(2,1,0)])\n\n clrs = sns.color_palette(\"Set2\", len(CLASSES))\n for det in results:\n cls_ind, box, score = det[0], det[1:5], det[5]\n if score < 0.8: continue\n clr = clrs[int(cls_ind)]\n rect = plt.Rectangle( (box[0], box[1]), box[2]-box[0], box[3]-box[1], fill=False, edgecolor=clr, linewidth=2.5)\n axe.add_patch(rect)\n axe.text(box[0], box[1]-2, '{:.3f}'.format(score), bbox=dict(facecolor=clr, alpha=0.5), fontsize=14, color='white')\n\n axe.axis('off')\n save_name = os.path.basename(im_file)\n plt.savefig('[DEMO]'+save_name, dpi=200) \n\n else:\n _t['save'].tic()\n with open( os.path.join(resDir, fname.split('.')[0] + '.txt'), 'w') as fp: \n for det in results:\n if len(det) == 0: continue \n if det[5] < 0.01: continue\n\n resStr = '{:s} -1 -1 -10 '.format(CLASSES[int(det[0])]) \n resStr += '{:.2f} {:.2f} {:.2f} {:.2f} '.format(det[1],det[2],det[3],det[4]) # x1 y1 x2 y2\n resStr += '-1 -1 -1 -1000 -1000 -1000 -10 {:.4f}\\n'.format(det[5])\n fp.write( resStr ) \n _t['save'].toc()\n\n return _t", "def get_features(img1,mask1, depth1):\n colors = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)\n img3 = img1.copy()\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img1 = clahe.apply(img1) # Applying Clahe\n kp, des = orb.detectAndCompute(img1, mask=mask1) # Computing ORB features\n kp_pts = np.float32([ kp[m].pt for m in range(len(kp))]).reshape(-1,2)\n # Getting Colors\n col = []\n for i in range(len(kp)):\n col.append(colors[kp_pts[i,1].astype(int), kp_pts[i,0].astype(int)])\n col = np.array(col)\n # Getting 2D points\n kp_2d = []\n for m in range(len(kp)):\n kp_2d.append([int(kp[m].pt[0]), int(kp[m].pt[1])])\n kp_2d = np.array(kp_2d).reshape(-1,2)\n \n # Getting the 3D points\n kp_3d, _, _ = convert_3d(kp_2d, depth1, img3)\n \n # Removing points with Zero depth\n my_ind = np.where(kp_3d[:,2]!=0)[0]\n new_kp_3d = kp_3d[my_ind,:]\n new_kp_2d = kp_2d[my_ind,:]\n new_des = des[my_ind,:]\n new_col = col[my_ind,:]\n \n # Removing the duplicates\n uni_3d = np.unique(new_kp_3d, return_index= True, axis=0)[1]\n new_kp_3d1 = new_kp_3d[uni_3d,:]\n new_kp_2d1 = new_kp_2d[uni_3d,:]\n new_des1 = new_des[uni_3d,:]\n new_col1 = new_col[uni_3d,:]\n return kp_3d, kp_2d, des, col", "def predict(model, image, score_thresh, screen_mode, fill):\n\n global COLOR_DICT, prev_bboxes, prev_classes\n\n # Run the prediction\n scores, boxes, classes = model.predict(image)\n \n # Prepare the images for augmentation\n if screen_mode:\n new_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n else:\n new_image = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)\n cv2.rectangle(new_image, (0, 0), (image.shape[1], image.shape[0]), (255, 0, 0), 5)\n\n # Go through each bounding box and only draw and save the ones above the score threshold\n detected = []\n for i in range(len(scores)):\n if scores[i] > score_thresh:\n detected.append([i, classes[i] + 1])\n detected = bbox_sort(detected) \n \n text_list = [] \n bboxes = []\n classes = []\n for i in range(len(detected)):\n box = boxes[detected[i][0]] * np.array([image.shape[0], image.shape[1], image.shape[0], image.shape[1]])\n bboxes.append(box)\n classes.append(detected[i][0])\n \n matched_indices = matchBBoxes(bboxes, prev_bboxes, 100)\n \n for i in range(len(detected)):\n color = COLOR_DICT[detected[i][1]]\n \n x0 = bboxes[i][1] - 20\n y0 = bboxes[i][0] - (1080 - bboxes[i][0]) * 50 / 1080\n x1 = bboxes[i][3] + 20\n y1 = bboxes[i][2]\n \n num_pairs = 0\n \n for index_pair in matched_indices:\n if index_pair[0] == i and detected[i][0] == prev_classes[index_pair[1]]:\n num_pairs += 1\n x0 = ((x0 * num_pairs) + prev_bboxes[index_pair[1]][1] - 20) / (num_pairs + 1.0)\n y0 = ((y0 * num_pairs) + prev_bboxes[index_pair[1]][0] - (1080 - prev_bboxes[index_pair[1]][1]) * 50 / 1080) / (num_pairs + 1.0)\n x1 = ((x1 * num_pairs) + prev_bboxes[index_pair[1]][3] + 20) / (num_pairs + 1.0)\n y1 = ((y1 * num_pairs) + prev_bboxes[index_pair[1]][2]) / (num_pairs + 1.0)\n \n line_type = 3\n if fill and not screen_mode:\n line_type = cv2.FILLED\n \n cv2.rectangle(new_image, (int(x0), int(y0)), (int(x1), int(y1)), color, line_type)\n\n name = CLASS_DICT[detected[i][1]]\n \n prev_bboxes = bboxes\n prev_classes = classes\n dy = 50 # Change in y position for each item\n for text in text_list:\n color = COLOR_DICT[text[2]]\n cv2.putText(new_image, str(text[1]) + \"x \" + text[0], (1500, y), cv2.FONT_HERSHEY_DUPLEX, 0.5, color, lineType=cv2.LINE_AA)\n y += dy\n\n return new_image", "def predict(X_img, knn_clf=None, model_path=None, distance_threshold=0.6):\n # if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:\n # raise Exception(\"Invalid image path: {}\".format(X_img_path))\n #\n # if knn_clf is None and model_path is None:\n # raise Exception(\"Must supply knn classifier either thourgh knn_clf or model_path\")\n #\n # # Load a trained KNN model (if one was passed in)\n print(model_path)\n if knn_clf is None:\n with open(model_path, 'rb') as f:\n print(f)\n print(\"before open\")\n knn_clf = pickle.load(f)\n print(\"is_open?\")\n\n # Grab a single frame of video\n # ret, frame = X_img_path.read()\n\n # Load image file and find face locations\n # X_img = frame[:, :, ::-1] #np.array(frame)\n print(\"X_img why not working\")\n # print(X_img)\n startTime = time.time()\n\n X_img = face_recognition.load_image_file('find.jpg')\n print(\"face_recognition : load img\")\n print(time.time() - startTime)\n\n startTime = time.time()\n\n X_face_locations = face_recognition.face_locations(X_img)\n print(X_face_locations)\n print(time.time() - startTime)\n startTime = time.time()\n #print(type((X_face_locations[0])[2]))\n #X_face_locations = fd.get_face()\n #X_face_locations = [(int(X_face_locations[0]), int(X_face_locations[3]), int(X_face_locations[2]), int(X_face_locations[1]))]\n print(X_face_locations)\n # face_bounding_boxes1.append(X_face_locations[0])\n # face_bounding_boxes1.append(X_face_locations[1])\n # face_bounding_boxes1.append(X_face_locations[2])\n # face_bounding_boxes1.append(X_face_locations[3])\n print(\"face location\")\n print(X_face_locations)\n print(time.time() - startTime)\n print(len(X_face_locations))\n\n # cv2.imshow(\"asdf\", X_face_locations)\n # If no faces are found in the image, return an empty result.\n if len(X_face_locations) == 0:\n return []\n\n # Find encodings for faces in the test iamge\n # print(rgb_small_frame)\n print(\"X_face_locations\")\n print(X_face_locations)\n\n # cap = cv2.VideoCapture(0)\n # ret1, frame1 = cap.read()\n\n # while True:\n #\n # if ret:\n # cv2.imshow(\"video\", X_img)\n #\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break;\n # else:\n # break;\n #print(X_face_locations)\n startTime = time.time()\n faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)\n print(\"encoding\")\n print(time.time() - startTime)\n #print(faces_encodings)\n startTime = time.time()\n # Use the KNN model to find the best matches for the test face\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\n print(\"kneighbors\")\n print(time.time() - startTime)\n # closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\n are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]\n\n # Predict classes and remove classifications that aren't within the threshold\n return [(pred, loc) if rec else (\"unknown\", loc) for pred, loc, rec in\n zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]", "def stabilization(videopath,\n smoothing_radius=50,\n fixed_area=[0, -1, 0, -1],\n stab_points=200):\n\n # Read original video\n # Extract directory and videoname for saving purposes\n capture = cv2.VideoCapture(videopath)\n directory, videoname = os.path.split(videopath)\n videoname = os.path.splitext(videoname)[0]\n\n # Get number of frames\n # Get width and height of video stream\n # Get frames per second (fps)\n n_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = capture.get(cv2.CAP_PROP_FPS)\n\n # Set up output video\n out = cv2.VideoWriter(os.path.join(directory, str(videoname)+'_stabilized.mp4'),\n -1,\n fps,\n (w, h))\n\n # Read first frame\n # Convert frame to grayscale and extract part of image\n _, prev = capture.read()\n xmin, xmax, ymin, ymax = fixed_area\n prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)[ymin:ymax, xmin:xmax]\n\n # Pre-define transformation-store array\n transforms = np.zeros((n_frames-1, 3), np.float32)\n\n # Filling in transformation array per frameset\n with tqdm(total=2*(n_frames-2), ncols=50) as pbar:\n for i in range(n_frames-2):\n # Detect feature points in previous frame\n prev_pts = cv2.goodFeaturesToTrack(prev_gray,\n maxCorners=stab_points,\n qualityLevel=0.1,\n minDistance=100,\n blockSize=10)\n # Read next frame\n # If not success: break loop\n success, curr = capture.read()\n if not success:\n break\n\n # Convert to grayscale and extract part of image\n # Calculate optical flow (i.e. track feature points)\n # Sanity check\n curr_gray = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)[ymin:ymax,\n xmin:xmax]\n curr_pts, status, err = cv2.calcOpticalFlowPyrLK(prev_gray,\n curr_gray,\n prev_pts,\n None)\n assert prev_pts.shape == curr_pts.shape\n\n # Filter only valid points\n idx = np.where(status == 1)[0]\n prev_pts = prev_pts[idx]\n curr_pts = curr_pts[idx]\n\n # Find transformation matrix\n m = cv2.estimateAffinePartial2D(prev_pts, curr_pts)[0]\n\n # Extract translation\n # Extract rotation angle\n dx = m[0, 2]\n dy = m[1, 2]\n da = np.arctan2(m[1, 0], m[0, 0])\n\n # Store transformation\n transforms[i] = [dx, dy, da]\n\n # Move to next frame\n prev_gray = curr_gray\n\n pbar.update(1)\n\n # Compute trajectory using cumulative sum of transformations\n # Create variable to store smoothed trajectory\n # Calculate diference in smoothed_trajectory and trajectory\n trajectory = np.cumsum(transforms, axis=0)\n smoothed_trajectory = smooth(trajectory, smoothing_radius)\n difference = smoothed_trajectory - trajectory\n\n # Calculate newer transformation array\n transform_smooth = transforms + difference\n\n # Reset stream to first frame\n capture.set(cv2.CAP_PROP_POS_FRAMES, 0)\n\n # Apply transformations to video\n for i in range(n_frames-2):\n # Read next frame\n success, frame = capture.read()\n if not success:\n break\n\n # Extract transformations from the new transformation array\n dx = transform_smooth[i, 0]\n dy = transform_smooth[i, 1]\n da = transform_smooth[i, 2]\n\n # Reconstruct transformation matrix accordingly to new values\n m = np.zeros((2, 3), np.float32)\n m[0, 0] = np.cos(da)\n m[0, 1] = -np.sin(da)\n m[1, 0] = np.sin(da)\n m[1, 1] = np.cos(da)\n m[0, 2] = dx\n m[1, 2] = dy\n\n # Apply affine wrapping to the given frame\n # Fix border artifacts\n frame_stabilized = cv2.warpAffine(frame, m, (w, h))\n frame_stabilized = fixBorder(frame_stabilized)\n\n # Save new frame\n out.write(frame_stabilized)\n\n pbar.update(1)\n\n # Release original and stabilized video\n capture.release()\n out.release()", "def get_classification_simulator(self, image):\n\n r_channel = image[:,:,2]\n g_channel = image[:,:,1]\n\n\n\n # Threshold color channel\n s_rgy_min = 50\n s_thresh_min = 245\n s_thresh_max = 255\n \n #s_binary = np.zeros_like(r_channel)\n r_binary = np.zeros_like(r_channel)\n g_binary = np.zeros_like(r_channel)\n y_binary = np.zeros_like(r_channel)\n \n #s_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) | ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n \n r_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & (g_channel <= s_rgy_min)] = 1\n g_binary[((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max)) & (r_channel <= s_rgy_min)] = 1\n y_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n\n #res = cv2.bitwise_and(img,img,mask = s_binary)\n \n #maxx=image.shape[1]\n maxy=image.shape[0]\n \n y_top=0\n window_size_y=50\n y_bottom=y_top+window_size_y\n \n max_color=0\n tf_color=TrafficLight.UNKNOWN\n \n while (y_bottom< maxy):\n #print(img[y_top:y_bottom,:,:])\n rs= r_binary[y_top:y_bottom,:].sum()\n gs= g_binary[y_top:y_bottom,:].sum()\n ys= y_binary[y_top:y_bottom,:].sum()\n if (rs>max_color):\n max_color=rs\n tf_color=TrafficLight.RED\n if (gs>max_color):\n max_color=gs\n tf_color=TrafficLight.GREEN\n if (ys>max_color):\n max_color=ys\n tf_color=TrafficLight.YELLOW\n y_top+=window_size_y\n y_bottom+=window_size_y\n \n if (max_color<100):\n tf_color=TrafficLight.UNKNOWN\n \n\n\n return tf_color", "def __init__(self, bbox, frame, min_hits, max_age):\n print(\"DETECTION\")\n # bounding box\n self.csrt = cv2.TrackerCSRT_create()\n # convert the bbox to csrt bbox format\n # print(bbox[:4])\n self.csrt.init(frame, convert_bbox_to_wh(bbox[:4]))\n self.bbox = bbox\n self.time_since_update = 0\n self.id = CSRTTracker.count\n CSRTTracker.count += 1\n self.history = []\n self.min_hits = min_hits\n self.max_age = max_age\n self.hits = 0\n self.hit_streak = 1\n self.age = 0\n\n self.detclass = bbox[5]\n self.confidence = bbox[4]", "def motionDeflicker(imgs):\n b = [x[:,:,0] for x in imgs] \n g = [x[:,:,1] for x in imgs] \n r = [x[:,:,2] for x in imgs] \n b_corrected = single_deflicker(b)\n g_corrected = single_deflicker(g)\n r_corrected = single_deflicker(r)\n return cv2.merge((np.uint8(b_corrected),np.uint8(g_corrected),np.uint8(r_corrected)))", "def extractFeatures(bwimage):\n \n \n # circularity\n img = bwimage.copy()\n img1, contours, hierarchy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n \n if len(contours)==0:\n return []\n B = contours[0]\n C = B[:,0,0]\n l = C.size\n \n \n if abs(B[0,0,0] - B[l-1,0,0]) + abs(B[0,0,1] - B[l-1,0,1]) == 2:\n P8 = math.sqrt(2)\n else:\n P8 = 1 \n for j in range(0,l-1): \n if abs((B[j+1,0,0] - B[j,0,0])) + abs(B[j+1,0,1] - B[j,0,1]) == 2:\n P8 = P8 + math.sqrt(2)\n else:\n P8 = P8 + 1\n \n n = np.count_nonzero(bwimage)\n \n circularity = P8*P8/n\n \n \n # elongation\n idx = np.nonzero(bwimage);\n c = idx[1]\n r = idx[0]\n meanx = np.mean(c)\n meany = np.mean(r)\n \n \n pows = 2*np.ones(n)\n \n sigxx = np.sum(np.power((c-meanx),pows))/n\n sigyy = np.sum(np.power((r-meany),pows))/n\n sigxy = np.sum(np.multiply((r-meany),(c-meanx)))/n\n \n covMat = np.array([[sigxx, sigxy], [sigxy, sigyy]])\n val, vects = np.linalg.eig(covMat);\n \n maxEigenValue = np.amax(val) \n minEigenValue = np.amin(val.ravel()[np.flatnonzero(val)])\n \n \n elongation = math.sqrt(maxEigenValue/minEigenValue);\n \n \n # principal axis\n maxidx = np.argmax(val)\n principalAxisVector = vects[maxidx]\n \n \n return [circularity, elongation, principalAxisVector]", "def my_lane_detection_pipeline(image, debug_images=False):\n\n # Step 1 - Filter and enhance image by lane color\n image_s1 = filter_lane_color(image)\n \n # Step 2 - Canny edge detection with Gaussian blur and region mask\n image_s2 = detect_lane_edges(image_s1)\n \n # Step 3 - Raw line detection by Hough transform and classify left/right by angle\n (image_s3, left_lines, right_lines) = detect_lane_lines(image_s2, image)\n \n # Step 4 - Set left/right lanes by weighted linear polyfit of raw lines\n image_s4 = set_lanes(left_lines, right_lines, image_s3)\n \n # Save images of each step for debugging and documentation\n if debug_images:\n mpimg.imsave('test_images_output/'+image_name.replace('.jpg','_s0.jpg'), image)\n mpimg.imsave('test_images_output/'+image_name.replace('.jpg','_s1.jpg'), image_s1, cmap = 'gray')\n mpimg.imsave('test_images_output/'+image_name.replace('.jpg','_s2.jpg'), image_s2)\n mpimg.imsave('test_images_output/'+image_name.replace('.jpg','_s3.jpg'), image_s3)\n mpimg.imsave('test_images_output/'+image_name.replace('.jpg','_s4.jpg'), image_s4)\n \n # Output image with overlaid raw lane lines and detected left/right lanes\n return image_s4", "def create_binary_image(img, s_thresh=(100, 255), sx_thresh=(10, 200), dir_thresh=(np.pi/6, np.pi/2), c_thresh=50):\n # We use a combination of gradient and direction threshold\n # convert to gray scale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Compute the combined threshold\n sobel_x = sobel_mask(gray, sx_thresh)\n dir_gradient = dir_mask(gray, dir_thresh)\n combined = ((sobel_x == 1) & (dir_gradient == 1))\n\n # Color threshold in RGB color space\n # This helps to detect yellow lanes better, which is a significant issue in the video \n G = img[:,:,1]\n R = img[:,:,2]\n r_g = (R > c_thresh) & (G > c_thresh)\n \n # color channel thresholds\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n S = hls[:,:,2]\n L = hls[:,:,1]\n \n # S channel performs well for detecting bright yellow and white lanes\n s = (S > s_thresh[0]) & (S <= s_thresh[1])\n l = (L > s_thresh[0]) & (L <= s_thresh[1])\n\n # combine all the thresholds\n # The pixel we want is either white or yellow\n color_combined = np.zeros_like(R)\n color_combined[(r_g & l) & (s | combined)] = 1\n \n # apply the region of interest mask\n # This helps to remove the shadow outside the lane\n mask = np.zeros_like(color_combined)\n h, w = img.shape[0], img.shape[1]\n polygon_vertice = np.array([[0,h-1], [w//2, h//2], [w-1, h-1]], dtype=np.int32)\n cv2.fillPoly(mask, [polygon_vertice], 1)\n binary = cv2.bitwise_and(color_combined, mask)\n \n return binary", "def vis_detections(lossfunc, image_name, class_name, dets, thresh=0.5):\n if lossfunc == 'vanilla':\n label = 'Smooth L1 Loss'\n if lossfunc == 'robust':\n label = 'Robust L1 Loss (10%)'\n\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n im = cv2.imread(im_file)\n\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots()\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n color='white')\n\n ax.set_title('{}, {} Class'.format(label, class_name.capitalize()))\n plt.axis('off')\n plt.tight_layout()\n fig.savefig(os.path.join('cs231n', 'viz', lossfunc, '{}_{}.png'.format(\n image_name.split('.')[0], class_name)))", "def process(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \n x_t = cv2.resize(img, (112, 160), interpolation=cv2.INTER_AREA)\n x_t = np.nan_to_num(x_t)\n x_t = cv2.Laplacian(x_t,cv2.CV_8U)\n\n return x_t.astype(np.uint8)", "def x_means(image_path):\n\n # List where all the descriptors are stored\n des_list = []\n\n im = cv2.imread(image_path)\n kpts = fea_det.detect(im)\n kpts, des = des_ext.compute(im, kpts)\n\n # rootsift - not boosting performance\n # rs = RootSIFT()\n # des = rs.compute(kpts, des)\n\n des_list.append((image_path, des))\n\n # Stack all the descriptors vertically in a numpy array\n descriptors = des_list[0][1]\n\n # gather features\n test_features = np.zeros((1, numWords), \"float32\")\n words, _ = vq(descriptors, voc)\n for w in words:\n test_features[0][w] += 1\n\n # Perform Tf-Idf vectorization and L2 normalization\n test_features = test_features * idf\n test_features = preprocessing.normalize(test_features, norm='l2')\n\n score = np.dot(test_features, im_features.T)\n # print \"score: \", score\n rank_ID = np.argsort(-score)\n # print \"rank matrix: \", rank_ID[0]\n\n for ID in rank_ID[0]:\n if score[0][ID] <= float(lowcut):\n if ID not in low_union:\n low_union.append(ID)\n\n x_list = []\n for x in score[0]:\n x_list.append([x])\n X = np.array(x_list)\n for n in X:\n n[0] *= 1000000 # increase difference\n # print \"Scores for X-Means: \", X\n\n # compute k range\n # ks = range(1, 21)\n bot_k = 30 * len(image_paths) / (200 + len(image_paths)) - 5\n ks = range(bot_k, bot_k + 10)\n\n # run ks times kmeans and save each result in the KMeans object\n KMeans = [cluster.KMeans(n_clusters=i, init=\"k-means++\").fit(X) for i in ks]\n\n # now run for each cluster the BIC computation\n bic_max = -sys.maxint - 1\n max_idx = 0\n # BIC = [compute_bic(kmeansi,X) for kmeansi in KMeans]\n for i in range(len(KMeans)):\n curr = compute_bic(KMeans[i], X)\n # print \"BIC = \", curr, \" when using %d clusters\\r\" % (i + bot_k)\n if curr > bic_max:\n bic_max = curr\n max_idx = i\n\n best_k = max_idx + bot_k\n # print \"Best K = \", best_k, \"with BIC = %d\\r\" % bic_max\n best_k_labels = KMeans[max_idx].labels_\n # print \"Best K labels\", KMeans[max_idx].labels_\n\n freq = {}\n for cluster_no in best_k_labels:\n if cluster_no in freq:\n freq[cluster_no] = freq[cluster_no] + 1\n else:\n freq[cluster_no] = 1\n max_cluster = 0\n max_size = -1\n for k, v in freq.items():\n if v > max_size:\n max_cluster = k\n max_size = v\n res = []\n for idx in range(len(best_k_labels)):\n if best_k_labels[idx] == max_cluster:\n res.append(idx)\n assert max_size == len(res)\n print \"\\nSize of the largest cluster = \", max_size\n\n # # Archive big clusters for inspection\n # path = os.path.split(pool_path)[0] + \"\\\\\" + \"cluster_\" + os.path.split(image_path)[1]\n # if os.path.exists(path):\n # shutil.rmtree(path)\n # os.mkdir(path)\n # print \"Adding cluster images for pool image \", image_path, \" to path \", path\n # for ID in res:\n # # print \"Adding \", image_paths[ID]\n # shutil.copy(image_paths[ID], path)\n\n return res # biggest_cluster list", "def get_affine_orig():\n root_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/\"\n seq_dir = root_dir + \"sequences/\"\n affine_dir = root_dir + \"affine_orig/\"\n if not os.path.exists(affine_dir):\n os.makedirs(affine_dir)\n MIN_MATCH_COUNT = 10\n # 1088 is more accurate\n for seq in os.listdir(seq_dir):\n print(seq)\n seq_files = os.listdir(os.path.join(seq_dir, seq))\n seq_files = sorted(seq_files, key=lambda x: int(x[:-4]))\n affine_dict = {}\n for i in range(len(seq_files)-1):\n print(i)\n image0 = cv2.imread(os.path.join(seq_dir, seq, seq_files[i]))\n image1 = cv2.imread(os.path.join(seq_dir, seq, seq_files[i+1]))\n image0 = cv2.cvtColor(image0, cv2.COLOR_BGR2GRAY)\n image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\n surf = cv2.xfeatures2d.SURF_create()\n kp0, des0 = surf.detectAndCompute(image0, None)\n kp1, des1 = surf.detectAndCompute(image1, None)\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=10)\n\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matchs = flann.knnMatch(des0, des1, k=2)\n\n # store all the good matchs as per Lowe's ratio test\n good = []\n for m, n in matchs:\n if m.distance < 0.7 * n.distance:\n good.append(m)\n if len(good) > MIN_MATCH_COUNT:\n src_pts = np.float32([kp0[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp1[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n else:\n M = np.eye(3, 3)\n affine_dict[seq_files[i]] = M\n with open(os.path.join(seq_dir, affine_dir, seq+'.pickle'), 'wb') as fout:\n pickle.dump(affine_dict, fout)", "def process_camera():\n\n pic_array = take_picture()\n detections, shapes, descriptors = detect_faces(person_database,pic_array)\n\n names = []\n\n for desc in descriptors:\n name = find_match(person_database, desc)\n names.append(name)\n\n return pic_array, names, detections, shapes, descriptors", "def getCV(file1, type, imgOri,iteration):\n del s_arrays[:]\n del shapesContours[:]\n\n if type == 'shapes':\n spContours = getContours(imgOri,iteration)\n file1.write(str(len(spContours)) + '\\n')\n print(len(spContours))\n spContours = clean_Con(spContours)\n file1.write(str(len(spContours)) + '\\n')\n print(len(spContours))\n cv2.drawContours(imgOri, spContours, -1, (0, 255, 128), 5)\n # del s_arrays[:]\n # for each shape\n for cons in spContours:\n sampleComVector = []\n x, y, w, h = cv2.boundingRect(cons)\n cv2.rectangle(imgOri, (x, y), (x + w, y + h), (100, 100, 100), 1)\n\n # move the points to center\n for point in cons:\n sampleComVector.append(complex(point[0][0] - x, (point[0][1] - y)))\n # sampleComVectors store CV of all testees contours\n s_arrays.append(sampleComVector)\n # sampleContours store all testees contours, same order with sampleComVectors\n shapesContours.append(cons)\n\n elif type == 'temp':\n # Automatically find templete contour\n templetTrue = imgOri\n tpContour = getContours(templetTrue,iteration)\n for contour in tpContour:\n x, y, w, h = cv2.boundingRect(contour)\n #\n for point in contour:\n # -x and -y are to make left and upper boundry start from 0\n t_array.append(complex(point[0][0] - x, (point[0][1] - y)))", "def get_classification(self, image):\n\n # Convert image to PIL RGB image\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # add a fourth batch dimension to array\n image = np.expand_dims(image, axis=0)\n\n ## Predict images class\n if image.shape==(1, self.img_height, self.img_width, self.img_channels):\n y_pred = self.model.predict(image)\n else:\n rospy.logwarn(\"tl_classifier: Wrong image shape: {},{},{},{}\".format(image.shape[0],image.shape[1],image.shape[2],image.shape[3]))\n return TrafficLight.UNKNOWN\n\n # Filter predictions\n confidence_threshold = 0.7\n y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]\n\n # Output predicted classes and scores\n #rospy.loginfo(\"tl_classifier: class conf xmin ymin xmax ymax\")\n \n # Filter classes prediction\n tl_pred_classes = y_pred_thresh[0][:,0]\n tl_pred_scores = y_pred_thresh[0][:,1]\n # Find classes that contains tl's\n tl_pred_classes = [cl for cl in tl_pred_classes if 1<=cl<=3]\n\n\n # Test light state (if prediction is not empty)\n if len(tl_pred_classes) > 0:\n if (tl_pred_classes[0]==1):\n tl_return = TrafficLight.GREEN\n rospy.loginfo(\"tl_classifier: Green detected, score {:.2f}\".format(tl_pred_scores[0]))\n elif (tl_pred_classes[0]==2):\n tl_return = TrafficLight.YELLOW\n rospy.loginfo(\"tl_classifier: Yellow detected, score {:.2f}\".format(tl_pred_scores[0]))\n elif (tl_pred_classes[0]==3):\n tl_return = TrafficLight.RED\n rospy.loginfo(\"tl_classifier: Red detected, score {:.2f}\".format(tl_pred_scores[0]))\n else:\n tl_return = TrafficLight.UNKNOWN\n rospy.loginfo(\"tl_classifier: Other class detected!\")\n else:\n tl_return = TrafficLight.UNKNOWN\n rospy.loginfo(\"tl_classifier: Unknown detected!\")\n\n\n return tl_return", "def process_frame(self, \n img: np.ndarray) -> Tuple[List[List[int]],\n List[float],\n np.ndarray]:\n bbox_filtered, scores_filtered = [], []\n img_dif = None\n \n features = img.astype(np.float32)\n \n if self.story_features is not None:\n img_dif = AmphibianDetectorSSDBaseline._mse_distance(features, self.story_features)\n if img_dif.max() < self.motion_threshold:\n self.story_features = self.story_features * self.alpha + features * (1 - self.alpha)\n else:\n self.story_features = features\n \n if img_dif is not None and img_dif.max() >= self.motion_threshold:\n detection_boxes, detection_scores = self._detect_objects(features)\n\n for bbox, score in zip(detection_boxes, detection_scores):\n if score < self.detection_threshold:\n continue\n inner_dif = img_dif[int(bbox[0]*img_dif.shape[0]): int(bbox[2]*img_dif.shape[0]),\n int(bbox[1]*img_dif.shape[1]): int(bbox[3]*img_dif.shape[1])]\n\n if inner_dif.mean() >= self.motion_threshold:\n postproc_bbox = [int(bbox[0]*img.shape[0]), int(bbox[1]*img.shape[1]),\n int(bbox[2]*img.shape[0]), int(bbox[3]*img.shape[1])]\n bbox_filtered.append(postproc_bbox)\n scores_filtered.append(score)\n\n return bbox_filtered, scores_filtered, img_dif", "def main():\r\n args = Parameters().parse()\r\n # #\r\n # args.method = 'student_res18_pre'\r\n args.method = 'student_esp_d'\r\n args.dataset = 'camvid_light'\r\n args.data_list = \"/ssd/yifan/SegNet/CamVid/test.txt\"\r\n args.data_dir = \"/ssd/yifan/\"\r\n args.num_classes = 11\r\n # args.method='psp_dsn_floor'\r\n args.restore_from = \"./checkpoint/Camvid/ESP/base_57.8.pth\"\r\n # args.restore_from=\"/teamscratch/msravcshare/v-yifan/ESPNet/train/0.4results_enc_01_enc_2_8/model_298.pth\"\r\n # args.restore_from = \"/teamscratch/msravcshare/v-yifacd n/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER864/CS_scenes_40000.pth\"\r\n # args.restore_from = \"/teamscratch/msravcshare/v-yifan/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER5121024_esp/CS_scenes_40000.pth\"\r\n # args.data_list = '/teamscratch/msravcshare/v-yifan/deeplab_v3/dataset/list/cityscapes/train.lst'\r\n args.batch_size = 1\r\n print(\"Input arguments:\")\r\n for key, val in vars(args).items():\r\n print(\"{:16} {}\".format(key, val))\r\n\r\n h, w = map(int, args.input_size.split(','))\r\n input_size = (h, w)\r\n\r\n print(args)\r\n output_path = args.output_path\r\n if not os.path.exists(output_path):\r\n os.makedirs(output_path)\r\n # args.method='psp_dsn'\r\n deeplab = get_segmentation_model(args.method, num_classes=args.num_classes)\r\n\r\n ignore_label = 255\r\n id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,\r\n 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,\r\n 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,\r\n 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,\r\n 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,\r\n 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\r\n # args.restore_from=\"/teamscratch/msravcshare/v-yifan/sd_pytorch0.3/checkpoint/snapshots_resnet_psp_dsn_1e-4_5e-4_8_20000_DSN_0.4_769light/CS_scenes_20000.pth\"\r\n # if 'dense' in args.method:\r\n #\r\n if args.restore_from is not None:\r\n saved_state_dict = torch.load(args.restore_from)\r\n c_keys = saved_state_dict.keys()\r\n for i in c_keys:\r\n flag = i.split('.')[0]\r\n if 'module' in flag:\r\n deeplab = nn.DataParallel(deeplab)\r\n deeplab.load_state_dict(saved_state_dict)\r\n if 'module' not in flag:\r\n deeplab = nn.DataParallel(deeplab)\r\n # if 'dense' not in args.method:\r\n # deeplab = nn.DataParallel(deeplab)\r\n model = deeplab\r\n model.eval()\r\n model.cuda()\r\n # args.dataset='cityscapes_light'\r\n testloader = data.DataLoader(get_segmentation_dataset(args.dataset, root=args.data_dir, list_path=args.data_list,\r\n crop_size=(360, 480), mean=IMG_MEAN, scale=False,\r\n mirror=False),\r\n batch_size=args.batch_size, shuffle=False, pin_memory=True)\r\n\r\n data_list = []\r\n confusion_matrix = np.zeros((args.num_classes, args.num_classes))\r\n\r\n palette = get_palette(20)\r\n\r\n image_id = 0\r\n for index, batch in enumerate(testloader):\r\n if index % 100 == 0:\r\n print('%d processd' % (index))\r\n if args.side:\r\n image, label, _, size, name = batch\r\n elif 'sd' in args.dataset:\r\n _, image, label, size, name = batch\r\n else:\r\n image, label, size, name = batch\r\n # print('image name: {}'.format(name))\r\n size = size[0].numpy()\r\n output = predict_esp(model, image)\r\n # seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\r\n result = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\r\n # result=cv2.resize(result, (1024, 1024), interpolation=cv2.INTER_NEAREST)\r\n m_seg_pred = ma.masked_array(result, mask=torch.eq(label, 255))\r\n ma.set_fill_value(m_seg_pred, 20)\r\n seg_pred = m_seg_pred\r\n\r\n for i in range(image.size(0)):\r\n image_id += 1\r\n print('%d th segmentation map generated ...' % (image_id))\r\n args.store_output = 'True'\r\n output_path = './esp_camvid_base/'\r\n if not os.path.exists(output_path):\r\n os.mkdir(output_path)\r\n if args.store_output == 'True':\r\n # print('a')\r\n output_im = PILImage.fromarray(seg_pred[i])\r\n output_im.putpalette(palette)\r\n output_im.save(output_path + '/' + name[i] + '.png')\r\n\r\n seg_gt = np.asarray(label.numpy()[:, :size[0], :size[1]], dtype=np.int)\r\n ignore_index = seg_gt != 255\r\n seg_gt = seg_gt[ignore_index]\r\n seg_pred = seg_pred[ignore_index]\r\n confusion_matrix += get_confusion_matrix(seg_gt, seg_pred, args.num_classes)\r\n\r\n pos = confusion_matrix.sum(1)\r\n res = confusion_matrix.sum(0)\r\n tp = np.diag(confusion_matrix)\r\n\r\n IU_array = (tp / np.maximum(1.0, pos + res - tp))\r\n mean_IU = IU_array.mean()\r\n\r\n print({'meanIU': mean_IU, 'IU_array': IU_array})\r\n\r\n print(\"confusion matrix\\n\")\r\n print(confusion_matrix)", "def get_motion(frame1k, frame2k, frame_count):\n frame1 = frame1k.copy()\n frame2 = frame2k.copy()\n\n global limb_coords, init_coords, num_blocks\n cv2.imwrite(\"thisImageAnalyse.png\", frame2)\n block_size = 3\n block_rad = int(block_size/2)\n\n def get_SSD():\n \"\"\" applies SSD formula to search area\n :return SSD value\"\"\"\n dist = 0\n # traversal of pixels in potential Bi+1 block\n # compare corresponding pixel positions with source block in f1 and neighbour block in f2\n y1 = center_y1 - block_rad # start pos.\n for y2 in range(center_y2 - block_rad, (center_y2 - block_rad + block_size)):\n x1 = center_x1 - block_rad # start pos\n for x2 in range(center_x2 - block_rad, (center_x2 - block_rad + block_size)):\n try:\n # displacement formula for RGB channels of each pixel in block\n dist = dist + (frame1[y1][x1][0] - frame2[y2][x2][0])**2 + (frame1[y1][x1][1] - frame2[y2][x2][1])**2 + (frame1[y1][x1][2] - frame2[y2][x2][2])**2\n except RuntimeWarning:\n pass\n x1 += 1\n y1 += 1\n return math.sqrt(dist)\n\n # for each body part\n b = 0\n while b < 5:\n avg_x = 0.0\n avg_y = 0.0\n new_x = 0.0\n new_y = 0.0\n a = 0\n # for each block on body part (9 total)\n while a < num_blocks:\n found = False\n search_rad = 5\n while found is False:\n center_y1 = int(init_coords[b][a][0])\n center_x1 = int(init_coords[b][a][1])\n min_SSD = 999999\n # for pythagoras to ensure closest block gets picked when equality occurs of SSD value\n min_d = 999999\n # this finds the center of the block to compare\n for factor_y in range(-search_rad, search_rad + 1):\n center_y2 = center_y1 + block_size*factor_y\n y_dist = center_y1 - abs(center_y2)\n for factor_x in range(-search_rad, search_rad + 1):\n center_x2 = center_x1 + block_size*factor_x\n x_dist = center_x1 - abs(center_x2)\n # pythagoras\n d = math.sqrt((y_dist**2 + x_dist**2))\n if d < min_d:\n min_d = d\n\n SSD = get_SSD()\n if frame2[center_y2][center_x2][1] != 0 and frame2[center_y2][center_x2][2] != 0:\n found = True\n if SSD < min_SSD:\n min_SSD = SSD\n new_y = center_y2\n new_x = center_x2\n elif SSD == min_SSD and d < min_d:\n new_y = center_y2\n new_x = center_x2\n if found is False:\n # if no block is found repeat the search, increasing the search size by 1\n search_rad += 1\n # draw extracted vectors\n cv2.arrowedLine(frame1k, (int(center_x1), int(center_y1)), (int(new_x), int(new_y)), (150, 200, 30), 1, 4, 0, 0.3)\n avg_x += new_x\n avg_y += new_y\n init_coords[b][a][0] = new_y\n init_coords[b][a][1] = new_x\n a += 1\n cv2.imwrite('monkeyFrames/contrast_enhanced%d.png' % frame_count, frame1k)\n limb_coords[b][frame_count][0] = int(avg_y/num_blocks)\n limb_coords[b][frame_count][1] = int(avg_x/num_blocks)\n b += 1" ]
[ "0.6522741", "0.6153111", "0.6147058", "0.61378473", "0.61305165", "0.6099224", "0.60517925", "0.60009754", "0.5988831", "0.5987549", "0.59458303", "0.5937482", "0.59371877", "0.59214646", "0.5915029", "0.5895097", "0.5894147", "0.5874924", "0.584025", "0.58343947", "0.5822106", "0.5819223", "0.5792875", "0.5765166", "0.5764505", "0.57557034", "0.5751203", "0.5750547", "0.5728994", "0.57205117", "0.5718157", "0.5711019", "0.5703289", "0.5694051", "0.5625159", "0.56109923", "0.5598866", "0.55978006", "0.5596666", "0.5586124", "0.5585477", "0.55800027", "0.5577952", "0.55776775", "0.55774206", "0.5566372", "0.55558866", "0.55526304", "0.55500513", "0.55322766", "0.55132884", "0.5507471", "0.5503717", "0.5497538", "0.5495907", "0.5488216", "0.5487389", "0.54624057", "0.54603183", "0.54576683", "0.5441677", "0.54286975", "0.542325", "0.5422594", "0.5419101", "0.54174066", "0.5416399", "0.5401943", "0.5400693", "0.5396277", "0.53837985", "0.5381615", "0.53779984", "0.53734404", "0.53685975", "0.5366005", "0.5362662", "0.53611726", "0.5360497", "0.5355414", "0.5348845", "0.5342491", "0.53401417", "0.5338615", "0.53362966", "0.5331245", "0.53263503", "0.53253376", "0.531714", "0.53112096", "0.5310362", "0.5307155", "0.53053117", "0.53008294", "0.53001326", "0.5299558", "0.52984995", "0.5295642", "0.5289416", "0.52890646", "0.5288061" ]
0.0
-1
The main run loop
def run(self): r = rospy.Rate(100) while not rospy.is_shutdown(): r.sleep()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loop(self):\n pass", "def _run(self):\n while(self._loop):\n pass", "async def _main(self):\n while True:\n time.sleep(1)", "def run(self):\n self.cmdloop()", "def main_loop(self):\n # main loop...don't ever exit\n while True:\n # collect data\n # get the time...the local clock is set with NTP regularly\n self._get_time()\n \n # get the latest metar data from the closest location\n self._get_metar()\n \n # get the latest fence station data\n self._get_fence_station()\n \n # get the lastest roof station data\n #METAR self._get_roof_station()\n \n # publish the data to our data file\n self.write_data_files()\n \n # show the user we are running\n print(\"{:s}\".format(datetime.datetime.now(pytz.UTC).strftime(\"%Y-%m-%d %H:%M:%S.%f\")), end=\"\\r\", flush=True)\n \n # wait a bit for the next loop\n time.sleep(3.0)\n \n return", "def loop(self):\n raise NotImplementedError()", "def _handle_loop(self):\n pass", "def run():\n main()", "def main_loop(self):\n dt = 0\n self.clock.tick(FPS)\n while not self.done:\n self.event_loop()\n self.update(dt)\n self.render()\n dt = self.clock.tick(FPS) / 1000.0", "def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()", "def main_loop(self):\n dt = 0.3\n self.clock.tick(self.fps)\n while not self.done:\n self.event_loop()\n self.update(dt)\n self.render()\n dt = self.clock.tick(self.fps)/1000.0 # create delta time variable to multiply with movement and rotation\n self.display_fps()\n self.health_bar()\n self.enemy_health()\n self.energy_bar()", "def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()", "def run(self):\n if not self.running:\n self.loop.run_forever()", "def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)", "def dispatch_loop(self):\n pass", "def run(self):\n self.connect()\n self.run_forever()", "def main_loop(self):\n # run for only the allotted time (lifetime)\n for _ in range(self.lifetime * self.ticks_per_second):\n start_time = time()\n new_message, queue_len = self.communicator.get_message()\n if new_message is None: # no incoming messages\n self.do_random_task()\n else:\n # Convert string message back into tuple of ints\n new_message = list(map(int, new_message.split('@@@')))\n self.handle_incoming_message(new_message, queue_len)\n\n # this accounts for the time already taken in test_communication\n # and other activities from the total time allotted for the loop iteration\n already_taken = time() - start_time\n sleep_time = max(1/self.ticks_per_second - already_taken, 0)\n sleep(sleep_time)", "def run(self):\n \n # Wrap the outer loop in a try block so we can do an orderly shutdown\n # should an exception occur:\n try:\n # Send out a STARTUP event:\n self.dispatchEvent(weewx.Event(weewx.STARTUP))\n \n syslog.syslog(syslog.LOG_INFO, \"engine: Starting main packet loop.\")\n\n last_gc = int(time.time())\n\n # This is the outer loop. \n while True:\n\n # See if garbage collection is scheduled:\n if int(time.time()) - last_gc > self.gc_interval:\n ngc = gc.collect()\n syslog.syslog(syslog.LOG_INFO, \"engine: garbage collected %d objects\" % ngc)\n last_gc = int(time.time())\n\n # First, let any interested services know the packet LOOP is\n # about to start\n self.dispatchEvent(weewx.Event(weewx.PRE_LOOP))\n \n # Get ready to enter the main packet loop. An exception of type\n # BreakLoop will get thrown when a service wants to break the\n # loop and interact with the console.\n try:\n \n # And this is the main packet LOOP. It will continuously\n # generate LOOP packets until some service breaks it by\n # throwing an exception (usually when an archive period\n # has passed).\n for packet in self.console.genLoopPackets():\n \n # Package the packet as an event, then dispatch it.\n self.dispatchEvent(weewx.Event(weewx.NEW_LOOP_PACKET, packet=packet))\n\n # Allow services to break the loop by throwing\n # an exception:\n self.dispatchEvent(weewx.Event(weewx.CHECK_LOOP, packet=packet))\n\n syslog.syslog(syslog.LOG_CRIT, \"engine: Internal error. Packet loop has exited.\")\n \n except BreakLoop:\n \n # Send out an event saying the packet LOOP is done:\n self.dispatchEvent(weewx.Event(weewx.POST_LOOP))\n\n finally:\n # The main loop has exited. Shut the engine down.\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Main loop exiting. Shutting engine down.\")\n self.shutDown()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def _main_loop(self):\n while not rospy.is_shutdown():\n # Check for reconfiguration data\n if self._transfer_reconfigure_data is not None:\n # Copy reconfigure data from shared memory\n with self._transfer_reconfigure_data_mutex:\n reconfigure_data = deepcopy(self._transfer_reconfigure_data)\n self._transfer_reconfigure_data = None\n # Run vision reconfiguration\n self._configure_vision(*reconfigure_data)\n # Check for new image\n elif self._transfer_image_msg is not None:\n # Copy image from shared memory\n with self._transfer_image_msg_mutex:\n image_msg = self._transfer_image_msg\n self._transfer_image_msg = None\n # Run the vision pipeline\n self._handle_image(image_msg)\n # Now the first image has been processed\n self._first_image_callback = False\n else:\n try:\n self._rate.sleep()\n except rospy.exceptions.ROSTimeMovedBackwardsException:\n pass", "def run(self): # pragma: no cover\n while True:\n self.update()", "def main_loop(self) -> None:\n while True:\n # Log a message to say that Wheatley is waiting for 'Look To!'\n self.logger.info(\"Waiting for 'Look To!'...\")\n # Sit in an infinite loop whilst we're not ringing, and exit Wheatley if enough time\n # has passed\n self._last_activity_time = time.time()\n while not self._is_ringing:\n time.sleep(0.01)\n if self._server_mode and time.time() > self._last_activity_time + INACTIVITY_EXIT_TIME:\n self.logger.info(f\"Timed out - no activity for {INACTIVITY_EXIT_TIME}s. Exiting.\")\n return\n\n self.logger.info(f\"Starting to ring {self.row_generator.summary_string()}\")\n if self._server_mode:\n self._tower.set_is_ringing(True)\n\n while self._is_ringing:\n self.tick()\n time.sleep(0.01)\n\n self.logger.info(\"Stopping ringing!\")\n if self._server_mode:\n self._tower.set_is_ringing(False)", "def loop(self) -> AbstractEventLoop:", "def loop(self) -> AbstractEventLoop:", "def loop(self):\r\n while self.__running:\r\n self.__check_events()\r\n self.__render()\r\n self.__reset_variables()", "def main(self):\n self.startup()\n if self.vehicle:\n try:\n while not self._loop_should_exit:\n self.tick()\n time.sleep(1)\n except KeyboardInterrupt:\n self.cleanup()\n self.cleanup()", "def run(self):\n\t\t\n\t\twhile self.update():\n\t\t\tpass", "def run_main_loop():\n mainloop = GObject.MainLoop()", "def run_main(self):\n self.addSensors()\n \n while True:\n # Leemos los sensores\n self.readSensors()\n \n # Extraemos la información a partir de los datos\n self.processData()\n \n # Actualizamos la máquina de estados a partir de la información recibida por los sensores \n self.updateFiniteStateMachine()\n \n # Calculamos las acciones que tenemos que aplicar a los distintos motores, en función del\n # estado y las lecturas de los sensores\n self.controller()\n \n # Pasamos a motores las acciones calculadas\n self.execute()\n\n # Publicamos info importante para el debug\n self.refreshUserInterface()\n \n print(self.name + \": --------------------------\")\n time.sleep(2) #!!!!!!!!!!!!!!!! ELIMINAR DELAY !!!!!!!!!!!!!!!!# ", "def main_loop(self) -> None:\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self._impl.main_loop()", "def run(self):\n GLib.MainLoop().run()", "def _bg_thread_main(self) -> None:\n while not self._done:\n self._run_server_cycle()", "def run(self):\n self.run()", "def run(self):\n self._setupLogger()\n self.setup()\n\n self.logger.info(self.moduleName + \" starting run loop.\")\n\n while True:\n self.loop()", "def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True", "def gameloop(self):\r\n\r\n # What you see above (\"\"\" some text \"\"\") is called a docstring.\r\n # It explains the purpose of the method/function.\r\n # There should generally be one for every function.\r\n\r\n\r\n # Below is the main loop\r\n while True: \r\n # One cycle in the loop is equivalent to one frame.\r\n\r\n self.event()\r\n\r\n self.draw_objects()\r\n self.move_objects()\r\n\r\n self.update_display()", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def mainloop(self, run, *args, **kwds):\r\n #self.m.make()\r\n self.time = 0\r\n Dummy = things(0,0,0)\r\n Dummy.threadqueue.append(-1)\r\n Dummy.s=self\r\n Dummy.name = 'End of simulation.'\r\n heappush(self.queue, (self.timeout, (Dummy, 1000)))\r\n #count = 0\r\n while self.time < self.timeout:\r\n run(*args, **kwds)\r\n (now, (item, i)) = heappop(self.queue)\r\n self.CalculateResourse(now)\r\n self.time = now\r\n item.creat()\r\n #count += 1\r\n #print 'Number of loops' , count\r", "def run(self):\n ioloop.IOLoop.current().start()", "def run(self):\n self.__power_on()\n\n self.__main()", "def MainLoop(self):\n self.pleaseQuit=0\n\n self.logger.info(\"Starting main eventloop\")\n try:\n self.irc.process_forever(1)\n except KeyboardInterrupt:\n self.logger.warn(\"Received interrupt, disconnecting from irc\")\n #self.irc.disconnect_all(\"^C received\")\n self.irc.disconnect_all(\"even de suiker bijvullen\")\n \n self.logger.info(\"Finished disconnecting, shutting down\")", "def _run_loop(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self._server = websockets.serve(self._log_message, self._host, self._port)\n\n loop.run_until_complete(self._server)\n loop.run_forever()", "def _start_loop(self):\n self.p = tread.Thread(target=self._loop)\n self.p.start()", "def _run_cycle(self):\n pass", "def mainloop(self):\n self.app.mainloop()", "def postloop(self):\n print 'Bye!'", "def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.main_loop()", "def loop_run(self):\n self.log_debug(\"Running loop\")\n import cothread\n self.cothread = cothread\n self._loop_state = LState.Running\n if self.loop_event:\n # Call unbound function with a weak reference to self so that\n # garbage collector will call __del__ when we finish\n event_loop = weak_method(self.event_loop)\n loop_event = weak_method(self.loop_event)\n self.event_loop_proc = cothread.Spawn(event_loop, loop_event)\n else:\n self.event_loop_proc = cothread.Pulse()", "def Run():\r\n pass", "def main_loop(self):\n LOGGER.info('Entering main event loop...')\n try:\n while self._handle_faucet_events():\n while not self._faucet_events.event_socket_connected:\n LOGGER.info('Attempting faucet event sock connection...')\n time.sleep(1)\n try:\n self._faucet_events.connect()\n self._restore_states()\n self._faucet_collector.set_state_restored(True)\n except Exception as e:\n LOGGER.error(\"Cannot restore states or connect to faucet: %s\", e)\n self._faucet_collector.set_state_restored(False, e)\n except KeyboardInterrupt:\n LOGGER.info('Keyboard interrupt. Exiting.')\n self._faucet_events.disconnect()\n except Exception as e:\n LOGGER.error(\"Exception: %s\", e)\n raise", "def run(self):\n while True:\n self.sm.run()\n time.sleep(0.05)", "def run(self):\n while self.container.process(): pass", "def loop(self):\n while not self.should_exit:\n self._run_once()\n\n self.on_exit()", "def mainloop(self):\n self.root.mainloop()", "def mainloop(self):\n self.root.mainloop()", "def run_forever(self):\n self._loop.run_until_complete(self._loop_body())", "def run(self):\n self.loop.spawn_callback(self.main)\n self.loop.start()\n if self.exc_info:\n six.reraise(*self.exc_info)", "def mainloop(self):\n self.master.mainloop()", "def main(self,Surf):\n while True:\n if self.state == \"GAME\":\n self.event_loop()\n self.update(Surf)\n elif self.state == \"QUIT\":\n break\n pg.display.update()\n self.Clock.tick(65)", "def main_loop():\n while len(fake_threads) > 0:\n pulse(0.1)", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def main_loop(self):\n while self.game_manager.game_state != GameState.Quit:\n\n self.handle_events()\n self.handle_ui_response()\n #in menu\n if self.game_manager.game_state == GameState.Menu: \n self.display.clear()\n\n #in game\n elif self.game_manager.game_state == GameState.Running:\n self.game_manager.move_players()\n\n #after game\n elif self.game_manager.game_state == GameState.Finished:\n if self.game_manager.winner == None:\n self.game_manager.player1.decay()\n self.game_manager.player2.decay() \n else:\n self.game_manager.loser.decay()\n self.game_manager.loser.draw()\n\n #perform game manager actions\n self.game_manager.act()\n #do all the rendering stuff\n self.render_scene()\n #control FPS\n self.clock.tick(self.FPS)", "def run(self):\r\n pass", "def _main_loop(self):\n observer = Observer()\n observer.schedule(self.changes_event_handler, path=self.base_dir, recursive=False)\n observer.start()\n while True:\n if os.path.exists(self.todo_local_file):\n with open(self.todo_local_file, 'rb') as f:\n obj_list = pickle.load(f)\n\n today_todo_list = [i for i in obj_list if self.is_today_todo(i['time'])]\n self.solve_one_day_todo_events(todo_items_list=today_todo_list)\n else:\n time.sleep(60)\n pass", "def Listen(self):\n while True:\n time.sleep(1)", "def main():\n BouncyGUI().mainloop()", "def Gameloop():", "def run(self):\n try:\n while self._running:\n time.sleep(1)\n finally:\n self._exit()", "def run(self):\n if self._main_loop:\n return\n self._main_loop = GObject.MainLoop()\n self._disconnect_all()\n self._register()\n logger.info(\"--- Mainloop started ---\")\n logger.info(\"Hub is ready for onboarding\")\n try:\n self._main_loop.run()\n except KeyboardInterrupt:\n # ignore exception as it is a valid way to exit the program\n # and skip to finally clause\n pass\n except Exception as e:\n logger.error(e)\n finally:\n logger.info(\"--- Mainloop finished ---\")\n self._unregister()\n self._main_loop.quit()\n self._main_loop = None", "def run(self):\n\t\t\n\t\tpass", "def run(self):\r\n self.rpc_server.serve_forever(0.5)", "def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False", "def main_loop(self):\n import time\n while not self.ask_for_stop:\n self.run_pending()\n time.sleep(self.delay)\n # FIXME this will look at self.ask_for_stop only every self.delay seconds\n # see https://stackoverflow.com/questions/5114292/break-interrupt-a-time-sleep-in-python", "def mainloop(self):\n\t\tself.root.after(100, self.tkloop)\n\t\tself.root.mainloop()", "def run_forever(self):\n asyncio.run(self._loop_body())", "def game_loop(self):\n self.interface.game_loop(self)", "def main_loop(self):\n try:\n self.state_machine.set_state('wait')\n\n while True:\n events = list(reversed(pygame.event.get())) # Take all events, most recent first\n\n if self.find_quit_event(events):\n break\n\n if self.find_fullscreen_event(events):\n self.window.toggle_fullscreen()\n\n event = self.find_resize_event(events)\n if event:\n self.window.resize(event.size)\n\n self.state_machine.process(events)\n\n finally:\n self.led_picture.quit()\n self.led_print.quit()\n GPIO.cleanup()\n self.camera.quit()\n self.printer.quit()\n pygame.quit()", "def run(self):\n self.arbiter.start()", "def _main(self):\n run = True\n\n while run:\n if self._do_iteration():\n self.delay_multiplier = 1\n self.iterations_made += 1\n else: # Spaces between iterations will get bigger and bigger as they fail\n self.delay_multiplier += self.DELAY_MULTIPLIER_ON_FAIL\n\n self.seconds_remaining = self._get_delay()\n while self.seconds_remaining > 0: # Check kill flag every second\n sleep(1)\n if self._kill_thread:\n self._kill_thread = False\n run = False\n break\n self.seconds_remaining -= 1", "def run(self):\n while True:\n display(self.world.draw())\n self.read_and_process_input()", "def run(self):\n if self.okay:\n ExtLoopWin32.run()", "def __loop(self):\n\n self.__update_table()\n self.__update_labels()\n if self.remote_stop:\n self.__stop(\"remote telegram admin\")\n else:\n self.__main_window.after(1000, self.__loop)", "def loop_start( self ):\n self.client.loop_start()", "def run(self):\n while True:\n print(\"I'm running in the background\")\n time.sleep(self.interval)", "def run(self):\n\n self._daemon_thread.start()\n\n while True:\n time.sleep(5)" ]
[ "0.8346334", "0.8201697", "0.80821764", "0.7956556", "0.7799716", "0.7637182", "0.75982195", "0.75860673", "0.75406194", "0.7487311", "0.7481487", "0.7428722", "0.73967844", "0.7389132", "0.73753315", "0.7373896", "0.7358908", "0.73579377", "0.73549044", "0.73549044", "0.73533237", "0.7348693", "0.7297625", "0.7292823", "0.7292823", "0.7291617", "0.72698355", "0.72682005", "0.72556686", "0.7246061", "0.72399473", "0.72288966", "0.7223857", "0.7193984", "0.719256", "0.7182222", "0.7177106", "0.717074", "0.717074", "0.7169555", "0.7164605", "0.7149335", "0.7147554", "0.714085", "0.71359116", "0.7115755", "0.7098985", "0.70558286", "0.7046971", "0.7035441", "0.70327246", "0.7013264", "0.7009958", "0.700928", "0.6991084", "0.6986977", "0.6986977", "0.69855404", "0.6977226", "0.6956449", "0.6944313", "0.6941006", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.6935713", "0.69241893", "0.69104284", "0.69098556", "0.6902049", "0.69014406", "0.6896176", "0.687799", "0.68604994", "0.68588895", "0.68563074", "0.6855354", "0.68463224", "0.6834084", "0.6822455", "0.68221766", "0.6818641", "0.68165606", "0.6816014", "0.6810499", "0.6803627", "0.6799087", "0.6794546", "0.67840236", "0.67721015" ]
0.0
-1
Add padding for unet of given depth
def _pad(x, depth=4): divisor = np.power(2, depth) remainder = x.shape[0] % divisor # no padding because already of even shape if remainder == 0: return x # add zero rows after 1D feature elif len(x.shape) == 2: return np.pad(x, [(0, divisor - remainder), (0, 0)], "constant") # add zero columns and rows after 2D feature elif len(x.shape) == 3: return np.pad(x, [(0, divisor - remainder), (0, divisor - remainder), (0, 0)], "constant")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def space_to_depth_fixed_padding(inputs, kernel_size,\n data_format='channels_last', block_size=2):\n pad_total = kernel_size - 1\n pad_beg = (pad_total // 2 + 1) // block_size\n pad_end = (pad_total // 2) // block_size\n return _padding(inputs, (pad_beg, pad_end), data_format)", "def padding_depth(self):\n\t\treturn self.paddings_shape_param('D')", "def zero_pad_features(features, depth):\n\n n = int(features.get_shape().dims[-1])\n extra_feature_count = depth - n\n assert n >= 0\n if n > 0:\n padding = tf.tile(features[:, :, :, :1] * 0,\n [1, 1, 1, extra_feature_count])\n features = tf.concat([features, padding], 3)\n return features", "def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2", "def temporal_padding(x, padding=(1, 1)):\n assert len(padding) == 2\n pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]\n return tf.pad(x, pattern)", "def pad_upper(self, data, options, padding):\n # data, options = nrrd.read(input_file_name)\n rows, columns, depths = data.shape\n\n # numpy.fill\n for i in range(padding):\n padding_layer = [[self.AIR] * columns for j in range(rows)]\n data = self.concatenate_layers(data, padding_layer)\n\n options['sizes'][2] += padding # update depths\n return (data, options)", "def pad(x, system_shape, pad_size):\n res = unpad(tf.tile(x, (1,)+(3,)*len(pad_size)),\n tuple(s-p for s, p in zip(system_shape, pad_size)))\n return res", "def _zero_pad(self, kernel, size):\n if len(size) != kernel.ndim:\n size = kernel.shape[:1] + tuple(size) + kernel.shape[-1:]\n padsize = np.array(size) - np.array(kernel.shape)\n paddown = padsize // 2\n padup = padsize - paddown\n padarray = np.concatenate((padup[..., None],\n paddown[..., None]), axis=1)\n pads = tuple([tuple(p) for p in padarray])\n kernel_pad = np.pad(kernel, pads, 'constant', constant_values=0)\n return kernel_pad", "def pad(x, padding, fill_value=0):\n input_shape = x.shape\n output_shape = []\n indices = []\n\n for dim, pad in enumerate(padding):\n try:\n left_pad, right_pad = pad\n except TypeError:\n left_pad = right_pad = pad\n output_shape.append(left_pad + input_shape[dim] + right_pad)\n indices.append(slice(left_pad, left_pad + input_shape[dim]))\n\n if fill_value:\n out = T.ones(output_shape) * fill_value\n else:\n out = T.zeros(output_shape)\n return T.set_subtensor(out[tuple(indices)], x)", "def _prepare_onnx_paddings__tensorrt(g, input, pad):\n ctx = FUNCTION_REWRITER.get_context()\n torch_version = version_parse(torch.__version__)\n if torch_version.major == 1 and torch_version.minor < 10:\n return ctx.origin_func(g, input, pad)\n # The desired order of paddings is\n # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.\n # n is the dimension of input.\n # Assume zero-dimensions in the beginning, pad the \"pad\" sequence with\n # zeros in the beginning\n pad_len = torch.onnx.symbolic_opset9.size(\n g, pad, g.op('Constant', value_t=torch.tensor([0])))\n # Set extension = [0] * (dim * 2 - len(pad))\n rank = sym_help._get_tensor_rank(input)\n if rank is None:\n rank = g.op('Size', g.op('Shape', input))\n else:\n rank = g.op('Constant', value_t=torch.tensor(rank, dtype=torch.int64))\n extension = g.op(\n 'Sub',\n g.op('Mul', rank,\n g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))),\n pad_len)\n # Concat pad with extension: paddings = [dim_n_begin, dim_n_end,\n # dim_n-1_begin, dim_n-1_end, 0, 0, ... ]\n # Currently ONNX only supports int64 type for Pad\n pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n paddings = g.op(\n 'Concat',\n pad,\n g.op(\n 'ConstantOfShape',\n extension,\n value_t=torch.tensor([0], dtype=torch.int64)),\n axis_i=0)\n # Reshape and reverse order and collate first beginnings and then ends\n # paddings = [[..., 0, dim_n-1_begin, dim_n_begin],\n # [..., 0, dim_n-1_end, dim_n_end]]\n # Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin,\n # ..., 0, dim_n - 1_end, dim_n_end]\n\n # replace original Constant-Transpose-Constant with Slices and Concat.\n paddings = torch.onnx.symbolic_opset10.flip(g, paddings, [0])\n begins = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[1], ends=[0xffff], steps=[2])\n ends = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[0], ends=[0xffff], steps=[2])\n paddings = g.op('Concat', begins, ends, axis_i=0)\n padding_c = g.op(\n 'Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n return padding_c", "def convert_padding(g, op, block):\n\n input_x = g.get_node(op.input(\"X\")[0])\n input_padding = op.input(\"Paddings\")\n if input_padding:\n padding = g.get_node(input_padding[0])\n padding = infer_value(padding, g.get_params()).numpy().tolist()\n else:\n padding = op.attr(\"paddings\")\n padding = op.attr(\"paddings\")\n value = op.attr(\"value\")\n data_format = op.attr(\"data_format\")\n mode = op.attr(\"mode\")\n assert mode != \"circular\", \"Don't support mod='circular' for PaddlePaddle's padding\"\n if mode == \"replicate\":\n mode = \"edge\"\n\n pad_len = len(padding)\n new_paddings = [0] * (pad_len + 4)\n for i in range(0, pad_len, 2):\n index = -1 - i\n if data_format[:2] != \"NC\":\n index = -3 - i\n new_paddings[index] = padding[i + 1]\n new_paddings[index - 1] = padding[i]\n\n new_paddings = [new_paddings[i : i + 2] for i in range(0, len(new_paddings), 2)]\n\n out = _op.nn.pad(input_x, new_paddings, pad_value=value, pad_mode=mode)\n g.add_node(op.output(\"Out\")[0], out)", "def pad(input, pad_size):\n if not pad_size:\n return input\n return tf.pad(input, [[0,0],[pad_size, pad_size],[pad_size, pad_size],[0,0]], 'REFLECT')", "def fixed_padding(inputs, kernel_size, data_format='channels_last'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n return _padding(inputs, (pad_beg, pad_end), data_format)", "def fixed_padding_2d3d(self, inputs, kernel_size):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if self.data_format == 'channels_first':\n if len(inputs.shape)==4:\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n elif len(inputs.shape)==5:\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n if len(inputs.shape)==4:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n elif len(inputs.shape)==5:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs", "def _build(layer, height):\n if len(layer) == 1:\n return layer\n odd = None\n if len(layer) % 2:\n # promote to higher level\n odd = layer.pop(-1)\n # layer.append(layer[-1])\n new_layer = []\n for idx in range(0, len(layer), 2):\n node = Node(layer[idx].val + layer[idx + 1].val)\n node.h = height + 1\n node.l, node.r = layer[idx], layer[idx + 1]\n layer[idx].p, layer[idx + 1].p = node, node\n new_layer.append(node)\n if odd:\n odd.h += 1\n new_layer.append(odd)\n return new_layer", "def _pad1d(self, x: torch.Tensor, padding_left: int, padding_right: int, mode: str = \"zero\", value: float = 0.0):\n length = x.shape[-1]\n if mode == \"reflect\":\n max_pad = max(padding_left, padding_right)\n if length <= max_pad:\n x = F.pad(x, (0, max_pad - length + 1))\n return F.pad(x, (padding_left, padding_right), mode, value)", "def pad_graph(graph_dict, n_graphs_post_padding, n_nodes_post_padding, n_edges_post_padding):\n node_graph_idx = np.zeros(n_nodes_post_padding)\n node_graph_idx[:len(graph_dict['node_graph_idx'])] = graph_dict['node_graph_idx']\n graph_dict['node_graph_idx'] = node_graph_idx\n\n node_features = np.concatenate(graph_dict['node_features'])\n padded_node_features = np.zeros([n_nodes_post_padding, node_features.shape[1]],\n dtype=node_features.dtype)\n padded_node_features[:len(node_features), :] = node_features\n graph_dict['node_features'] = padded_node_features\n\n edge_graph_idx = np.zeros(n_edges_post_padding)\n edge_graph_idx[:len(graph_dict['edge_graph_idx'])] = graph_dict['edge_graph_idx']\n graph_dict['edge_graph_idx'] = edge_graph_idx\n\n edge_features = np.concatenate(graph_dict['edge_features'])\n padded_edge_features = np.zeros([n_edges_post_padding, edge_features.shape[1]],\n dtype=edge_features.dtype)\n padded_edge_features[:len(edge_features), :] = edge_features\n graph_dict['edge_features'] = padded_edge_features\n\n edge_idx_padding = np.zeros(shape=[2, n_edges_post_padding - len(edge_features)], dtype=np.int32)\n # transpose so shape is [n_edge, 2]\n graph_dict['edge_idx'] = np.concatenate(graph_dict['edge_idx'] + [edge_idx_padding], axis=1).T\n\n labels_array = -np.ones([n_graphs_post_padding], dtype=np.int32)\n labels_array[:len(graph_dict['labels'])] = graph_dict['labels']\n graph_dict['labels'] = labels_array\n return graph_dict", "def pad( padNumber, ant, subarray=DEFAULT):\n multiSubarray('pad', subarray, padNumber, ant)", "def insert_padding(img, pad_h, pad_w):\n global frame_height, frame_width\n padding_3_dims = ((pad_h, pad_h), (pad_w, pad_w), (0, 0))\n # apply padding in the above dimensions with values 0\n padded_img = numpy.pad(img, padding_3_dims, 'constant', constant_values=0)\n return padded_img", "def pad_edges(self, pad):\n weights=[]\n for dim, xy in zip([0, 1], [self.x, self.y]):\n xy0 = np.mean(xy)\n W = xy[-1]-xy[0]\n dist = np.abs(xy-xy0)\n wt=np.ones_like(dist)\n wt[ dist >= W/2 - pad] = 0\n weights += [wt]\n self.weight *= weights[0][:,None].dot(weights[1][None,:])", "def _padding(inputs, paddings, data_format):\n if data_format == 'channels_first':\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], paddings, paddings])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], paddings, paddings, [0, 0]])\n return padded_inputs", "def spatial_reflection_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):\n assert len(padding) == 2\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format ' + str(data_format))\n\n if data_format == 'channels_first':\n pattern = [[0, 0],\n [0, 0],\n list(padding[0]),\n list(padding[1])]\n else:\n pattern = [[0, 0],\n list(padding[0]), list(padding[1]),\n [0, 0]]\n return tf.pad(x, pattern, \"REFLECT\")", "def padding(old, l):\n new = deepcopy(old)\n for i, j in enumerate(new):\n new[i] += [0] * (l - len(j))\n new[i] = j[:l]\n return new", "def add_padding(img, x_padding):\n w = img.shape[1] + x_padding * 2\n img_with_padding = np.zeros((img.shape[0], w, 3), dtype=img.dtype)\n img_with_padding[:, x_padding:img.shape[1] + x_padding] = img\n return img_with_padding", "def get_paddings(self):\n return tf.constant([[0, 0,],\n [self._settings.half_patch_size, self._settings.half_patch_size],\n [self._settings.half_patch_size, self._settings.half_patch_size],\n [0, 0]])", "def spatial_reflection_2d_padding(x, padding=((1, 1), (1, 1)),\n data_format=None):\n assert len(padding) == 2\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format ' + str(data_format))\n\n if data_format == 'channels_first':\n pattern = [[0, 0],\n [0, 0],\n list(padding[0]),\n list(padding[1])]\n else:\n pattern = [[0, 0],\n list(padding[0]), list(padding[1]),\n [0, 0]]\n return tf.pad(x, pattern, \"REFLECT\")", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs", "def proper_padding(self, prediction, k_space_slice):\n h = prediction.shape[-3]\n w = prediction.shape[-2]\n w_pad = (k_space_slice.shape[-2] - w) // 2\n h_pad = (k_space_slice.shape[-3]-h) // 2\n return torch.nn.functional.pad(prediction, (0,0,w_pad,w_pad,h_pad,h_pad), \"constant\", 0)", "def padding(img, n):\n img = np.pad(img, [(n, n), (n, n)], mode='constant', constant_values=0)\n\n return img", "def _get_padding(w, h):\n dim_diff = np.abs(h - w)\n pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2\n return (0, pad1, 0, pad2) if h <= w else (pad1, 0, pad2, 0)", "def _fixed_padding(inputs, kernel_size, rate=1):\n kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),\n kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]\n pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]\n pad_beg = [pad_total[0] // 2, pad_total[1] // 2]\n pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],\n [pad_beg[1], pad_end[1]], [0, 0]])\n return padded_inputs", "def fixed_padding(inputs, kernel_size, rate=1):\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def padding_width(self):\n ...", "def padding(self):\n\t\treturn self.paddings_shape_param('W')", "def fixed_padding(inputs, kernel_size, data_format='channels_first'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs", "def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if kwargs['data_format'] == 'NCHW':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end],\n [pad_beg, pad_end]],\n mode=mode)\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs", "def pad(size, value):\n return (value + size - 1)/size*size", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(tensor=inputs,\n paddings=[[0, 0], [0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(tensor=inputs,\n paddings=[[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def build(self):\n pad_size_tmp = list(self.pad_size)\n\n # This handles the case where the padding is equal to the image size\n if pad_size_tmp[0] == self.input_size[0]:\n pad_size_tmp[0] -= 1\n pad_size_tmp[1] -= 1\n if pad_size_tmp[2] == self.input_size[1]:\n pad_size_tmp[2] -= 1\n pad_size_tmp[3] -= 1\n # Pytorch expects its padding as [left, right, top, bottom]\n self.padding_module = ReflectionPad2d([pad_size_tmp[2], pad_size_tmp[3],\n pad_size_tmp[0], pad_size_tmp[1]])", "def pad(tensor):\n paddings = tf.constant([[0, 0], [0, 10], [0, 0]])\n out = tf.pad(tensor, paddings, \"CONSTANT\", constant_values=0)\n return out", "def conv_pad(x, ks, mode):\n\tpad = (int(np.floor((ks-1)/2)), int(np.ceil((ks-1)/2)))\n\treturn F.pad(x, (*pad, *pad), mode=mode)", "def reflection_pad(images, filter_size):\n num = filter_size // 2\n return tf.pad(images, [[0, 0], [num, num], [num, num], [0, 0]], mode='REFLECT')", "def add_padding(im, pad):\n\n return np.pad(im, pad_width=((pad, pad), (pad, pad), (0, 0)), mode='symmetric')", "def padding(a, dim):\n\n return np.pad(a, (0, dim-len(a)), 'constant', constant_values=(0))", "def _padding(self, x, shape, value=0):\n row_padding = shape[0] - x.shape[0]\n col_padding = shape[1] - x.shape[1]\n return np.pad(x, [[0, row_padding], [0, col_padding]], mode=\"constant\", constant_values=value)", "def transform_padding(pad_width):\n num_pad_values = len(pad_width)\n onnx_pad_width = [0]*num_pad_values\n\n start_index = 0\n # num_pad_values will always be multiple of 2\n end_index = int(num_pad_values/2)\n for idx in range(0, num_pad_values):\n if idx % 2 == 0:\n onnx_pad_width[start_index] = pad_width[idx]\n start_index += 1\n else:\n onnx_pad_width[end_index] = pad_width[idx]\n end_index += 1\n\n return onnx_pad_width", "def add_padding(x, maxlen=500):\n \n # May want to increase maxlen from 500! Not sure the total dist of chomragram lengths.\n\n for i in range(len(x)):\n x[i] = x[i][:,:maxlen]\n q = maxlen - x[i].shape[1]\n p = q//2\n# if q % 2 == 0:\n# x[i] = np.pad(x[i], ((p,p), (0,0)), 'constant', constant_values=(0,0))\n# else:\n# x[i] = np.pad(x[i], ((p,p+1), (0,0)), 'constant', constant_values=(0,0))\n\n print\n if q % 2 == 0:\n x[i] = np.pad(x[i], ((0,0), (p,p)), 'constant', constant_values=(0,0))\n else:\n x[i] = np.pad(x[i], ((0,0), (p,p+1)), 'constant', constant_values=(0,0))\n \n return x", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def embedding_to_padding(emb):\n emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1)\n return tf.to_float(tf.equal(emb_sum, 0.0))", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def naive_block_padding(b: bytes, size: int) -> bytes:\n assert size <= 0xff\n\n l = len(b)\n if l > 0 and l % size == 0:\n return b\n\n return b + b'\\x00' * (size - (l % size))", "def zero_pad_channels(x, pad=0):\n pattern = [[0, 0], [0, 0], [0, 0], [pad - pad // 2, pad // 2]]\n return tf.pad(x, pattern)", "def pad(self, src):\n if(self.pre_pad):\n dst = src.new(\n src.size(0),\n src.size(1),\n src.size(2),\n src.size(3),\n 2\n ).zero_()\n dst.narrow(dst.ndimension()-1, 0, 1).copy_(\n torch.unsqueeze(src, 4)\n )\n else:\n padded = self.padding_module.updateOutput(src)\n dst = src.new(\n padded.size(0),\n padded.size(1),\n padded.size(2),\n padded.size(3),\n 2\n ).zero_()\n dst.narrow(4, 0, 1).copy_(\n torch.unsqueeze(padded, 4)\n )\n return dst", "def padding(image, padded_size):\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n\n padded_image = np.zeros((image_row + padded_size*2, image_col + padded_size*2)) #matriz de imagen con padding en zeros\n print(\"Padded image zeros:\")\n print(padded_image)\n\n padded_image[padded_size:padded_size + image_row, padded_size:padded_size + image_col] = image #matriz de imagen con padding\n print(\"Padded image:\")\n print(padded_image)\n\n \n return padded_image", "def right_padding_width(self):\n ...", "def compute_padding(size, factor):\n if size % factor == 0 and (size/factor) % 2 == 0:\n p = 0\n else:\n p = 1\n while (size + p) % factor != 0 or ((size + p) / factor) % 2 !=0:\n p += 1\n return p", "def pad(tensor, num=1):\n return tf.pad(tensor, [[0, 0], [num, num], [num, num], [0, 0]], \"CONSTANT\")", "def padding(self):\n pad = self.ntiles - self.windowsize\n return (int((pad - 1)/2.), int((pad + 1)/2.))", "def channel_padding(x):\n #keras.backend.concatenate([x, tf.zeros_like(x)], axis=-1)\n x0=keras.layers.Activation('sigmoid')(x)\n return keras.backend.concatenate([x, x0], axis=-1)", "def set_depth(node, depth):\n setattr(node[0], \"depth\", depth)", "def pad_node_id(node_id: np.uint64) -> str:\n return \"%.20d\" % node_id", "def test_pad6():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 1, 3, 0, 2, 0)\n mode = \"replicate\"\n data_format = \"NDHWC\"\n res = np.array(\n [\n [\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def _pad_traces(self, X):\n # Padding is done using using keras.preprocessing.sequence.pad_sequences(sequences, ...)\n # maxlen: Int, maximum length of all sequences.\n # truncating='pre' remove values at the beginning from sequences larger than maxlen\n # padding='pre' pads each trace at the beginning with a special integer (e.g., 0)\n X = pad_sequences(X, maxlen=self.trace_size, dtype=np.int16,\n truncating='pre', padding='pre', value=DEFAULTS['padding_symbol'])\n\n print(X)\n # Shape X is (n_traces, self.trace_size)\n # e.g., (25002, 20)\n\n return X", "def pad(msg):\n return msg + (BLOCK_SIZE - len(msg)) * PADDING", "def test_Pad3D7():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)", "def calculate_padding_to_align(length, align):\n return 0 if length % align == 0 else (align - (length % align))", "def increment_depth(self):\r\n self.depth = self.depth + 1", "def zero_pad(X, padding_width, dims):\n dims = (dims) if isinstance(dims, int) else dims\n pad = [(0, 0) if idx not in dims else (padding_width, padding_width)\n for idx in range(len(X.shape))]\n X_padded = np.pad(X, pad, 'constant')\n return X_padded", "def pad_zeros(x):\n dim = tf.shape(x)[0]\n log2_dim = tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.0)\n pad_dim = tf.pow(2, tf.cast(tf.math.ceil(log2_dim), tf.int32))\n with tf.control_dependencies([tf.debugging.assert_rank(x, 1)]):\n return tf.pad(x, [[0, tf.maximum(0, pad_dim - dim)]])", "def padOffset(east, north, up, ant, subarray=DEFAULT):\n multiSubarray('padOffset', subarray, east, north, up, ant)", "def __init__(self, padding, padding_mode, **kwargs):\n self._padding = padding\n self._padding_mode = padding_mode\n super(Pad1D, self).__init__(**kwargs)", "def pad(img: torch.Tensor, new_size: Union[int, Tuple[int, int]]) ->torch.Tensor:\n new_size = to_tuple(new_size)\n old_size = img.shape[-2:]\n pad_size = (torch.tensor(new_size) - torch.tensor(old_size)) / 2\n padding = torch.cat((torch.floor(pad_size), torch.ceil(pad_size)))\n padding[padding < 0] = 0\n padding = [int(x) for x in padding]\n return F.pad(img, padding=padding, padding_mode='edge')", "def wrap_pad(input, size):\n M1 = tf.concat([input[:, :, -size[1]:, :], input, input[:, :, 0:size[1], :]], 2)\n M1 = tf.concat([M1[:, -size[0]:, :, :], M1, M1[:, 0:size[0], :, :]], 1)\n return M1", "def _add_padding(input_str):\r\n padding_len = AES.block_size - len(input_str) % AES.block_size\r\n return input_str + padding_len * chr(padding_len)", "def stack(T, ks, stride, padding=False):\n\t# (B,C,H,W) -> unfold (B,C,I,J,ks,ks) -> permute (B,I,J,C,ks,ks)\n\treturn T.unfold(2,ks,stride).unfold(3,ks,stride).permute(0,2,3,1,4,5)", "def on_depth_image(self, depth_image):\n depth_image = depth_image.copy()\n mask = np.where(self.depth != 0)\n depth_image[mask] = self.depth[mask]\n return depth_image", "def pad(self) -> dict:\n raise NotImplementedError", "def pad(input, pad, mode='constant', value=0):\n ndim = input.ndimension()\n pads_begin, pads_end = [0] * ndim, [0] * ndim\n for i in range(len(pad) // 2):\n pads_begin[ndim - 1 - i] = pad[i * 2]\n pads_end[ndim - 1 - i] = pad[i * 2 + 1]\n mode = {'constant': 'CONSTANT', 'reflect': 'REFLECT',\n 'replicate': 'EDGE', 'circular': 'EDGE'}[mode]\n return FunctionLib.apply(\n 'Pad', input.device, [input], mode=mode, value=float(value),\n ndim=ndim, pads=pads_begin + pads_end)", "def log_depth(key, prefix, batch, i=0):\n depth = batch[key] if is_dict(batch) else batch\n inv_depth = 1. / depth[i]\n inv_depth[depth[i] == 0] = 0\n return prep_image(prefix, key,\n viz_inv_depth(inv_depth, filter_zeros=True))", "def _do_adaptive_padding(self, im):\n im_sz = list(im.shape)\n dim = len(im_sz)\n dim_to_pad = [dim_sz%self.adaptive_padding!=0 and dim_sz>3 for dim_sz in im_sz]\n dim_rem = [dim_sz//self.adaptive_padding for dim_sz in im_sz]\n new_dim_sz = [(dim_rem[i]+1)*self.adaptive_padding if dim_to_pad[i] else im_sz[i] for i in range(dim)]\n before_id = [(new_dim_sz[i] -im_sz[i]+1)//2 for i in range(dim)]\n after_id = [new_dim_sz[i] - im_sz[i] - before_id[i] for i in range(dim)]\n padding_loc = tuple([(before_id[i],after_id[i]) for i in range(dim)])\n new_img = np.lib.pad(im, padding_loc, 'edge')\n return new_img", "def _add_padding(self, instance):\n bit_length = (len(hex(instance)) - 2) * 4\n desired_padding_size = self.desired_instance_bits - bit_length\n padding = (2 ** desired_padding_size) - 1\n return self._append_hex(padding, instance)", "def padding(src, min_size):\n # pad before put into convolutional layer\n src_dim = src.dim()\n if src_dim[0][1] >= min_size:\n return src\n pad_size = min_size - src_dim[0][1]\n channels = src_dim[0][2] if len(src_dim[0]) >= 3 else 1\n if pad_size == 1:\n return dy.concatenate([src, dy.zeroes((src_dim[0][0], 1, channels))], d=1)\n else:\n left_border = int(pad_size) / 2\n right_border = (int(pad_size)+1) / 2\n return dy.concatenate([dy.zeroes((src_dim[0][0], left_border, channels)), src, dy.zeroes((src_dim[0][0], right_border, channels))], d=1) # do concatenate along cols", "def __Pad(self, data):\n pad = self.block_size - len(data) % self.block_size\n return data + pad * chr(pad)", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if is_NHWC(data_format):\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n return padded_inputs", "def hook(module, input):\n image_dimensions = input[0].size()[-2:]\n module.padding = _determine_inverse_padding_from_tf_same(image_dimensions, kernel_size, stride)", "def test_pad_8():\n paddle.disable_static()\n x = np.array([[[[1.0, 3.0], [-3.0, 1.0]]]])\n pad = [1, 1, 1, 2]\n mode = \"constant\"\n value = np.array(2.0)\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 1.0, 3.0, 2.0],\n [2.0, -3.0, 1.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ]\n ]\n ]\n )\n exp = paddle.nn.functional.pad(\n x=paddle.to_tensor(x), pad=pad, mode=mode, value=paddle.to_tensor(value), data_format=data_format\n )\n assert np.allclose(exp.numpy(), res)", "def test_pad7():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 2, 1, 1, 0, 0)\n mode = \"reflect\"\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n ]\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def hook(module, input):\n image_dimensions = input[0].size()[-2:]\n module.padding = _determine_padding_from_tf_same(image_dimensions, kernel_size, stride)", "def _flatten_and_pad_to_world_size(self, tensor: torch.Tensor,\n world_size: int) -> torch.Tensor:\n if self._shard_param_on_dim_0:\n # shard only on dim 0 of the parameter, without flattening\n if tensor.size(0) % world_size != 0:\n pad_size = world_size - tensor.size(0) % world_size\n tensor = F.pad(tensor, [0, 0] * (tensor.dim() - 1) + [0, pad_size])\n return tensor\n\n tensor = tensor.flatten()\n if tensor.numel() % world_size != 0:\n pad_size = world_size - tensor.numel() % world_size\n tensor = F.pad(tensor, [0, pad_size])\n\n return tensor", "def unpadding(img, n):\n img = img[n:img.shape[0]-n, n:img.shape[1]-n]\n\n return img", "def offset_pad(self, offset):\n return (((offset + 3) / 4) * 4)", "def padding(self):\r\n return self._generate_spacing_info(self.config['padding'])", "def __UnPad(self, padded):\n pad = ord(padded[-1])\n return padded[:-pad]", "def pad(plain, size):\n offset = size - (len(plain) % size)\n return plain + chr(offset) * offset", "def pad_1d(x, pad_left, pad_right, mode='constant', value=0.):\n if (pad_left >= x.shape[-1]) or (pad_right >= x.shape[-1]):\n if mode == 'reflect':\n raise ValueError('Indefinite padding size (larger than tensor).')\n res = F.pad(x.unsqueeze(2),\n (pad_left, pad_right, 0, 0),\n mode=mode, value=value).squeeze(2)\n return res", "def depth(x):\n return max(int(x * depth_multiplier), 8)", "def _dynamic_padding(self, batch_data, pad_id = 0 ):\n #print 'dynamic _padding...'\n #print 'pad_id' + str(pad_id)\n max_p_len = 1000\n max_q_len =1000\n pad_p_len = min(max_p_len, max(batch_data['passage_length']))+1\n #print 'pad_p_len' + str(pad_p_len)\n pad_q_len = min(max_q_len, max(batch_data['question_length']))\n #print 'pad_q_len' + str(pad_q_len)\n #for ids in batch_data['passage_token_ids'] :\n #print 'padding: '\n #print (ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len", "def additional_vertical_padding(self):\n return 0", "def resize_top(self, new_z, padding=None):\n self.upper_vertex[2] = new_z + padding" ]
[ "0.66942734", "0.6406283", "0.61821586", "0.6131536", "0.5901226", "0.5837195", "0.57383364", "0.56973714", "0.56866306", "0.56743133", "0.56252444", "0.551921", "0.55042475", "0.5423706", "0.5403531", "0.54034203", "0.5395218", "0.53895396", "0.538164", "0.5379684", "0.5374347", "0.53632003", "0.5352448", "0.53519785", "0.53474075", "0.5346369", "0.5335501", "0.53249127", "0.5321084", "0.53206474", "0.5310234", "0.5307841", "0.5306114", "0.5298229", "0.52857864", "0.5278424", "0.52783024", "0.5275957", "0.52566814", "0.5252171", "0.5247593", "0.5246216", "0.52344805", "0.5232526", "0.52241325", "0.5211646", "0.5208927", "0.52071536", "0.5203748", "0.5196518", "0.5193149", "0.51849294", "0.5183538", "0.5175097", "0.51416516", "0.5131655", "0.51270515", "0.5116308", "0.5111992", "0.5092556", "0.508808", "0.5059392", "0.505341", "0.50431514", "0.5039102", "0.5037776", "0.5028082", "0.50279486", "0.5026263", "0.50220954", "0.50197375", "0.5019531", "0.50115585", "0.5006594", "0.50055087", "0.49947786", "0.49944317", "0.49938324", "0.4987102", "0.49825245", "0.49736628", "0.4966936", "0.4966525", "0.4954446", "0.49459466", "0.4939793", "0.4930284", "0.4927737", "0.49271777", "0.4925482", "0.49226058", "0.4921198", "0.49199128", "0.49177018", "0.4913475", "0.4912964", "0.49053216", "0.49046582", "0.49017832", "0.49000168" ]
0.69494796
0
returns a normalized url to path relative from root
def relative_url(path, root): try: url = os.path.relpath(path, root) except: error('Unable to make a relative url:', url, root) url = url.replace('\\', '/') if os.sep == '\\' else url return urllib.parse.quote(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def full_url(self, path):\n if path[0] == '/':\n path = path[1:]\n return urljoin(self.absolute_root, path)", "def getRootURL():", "def relative_base(base):\n return as_base(base).lstrip('/')", "def _graceful_relative_url(base_url, url):\n if url == base_url:\n return ''\n base_prefix = '%s://%s' % urlparse.urlparse(base_url or '')[0:2]\n url_prefix = '%s://%s' % urlparse.urlparse(url or '')[0:2]\n if base_prefix == url_prefix and url_prefix != '://':\n return url[len(url_prefix):]\n return url", "def get_base_url(self):\n return urlparse.urljoin(self.domain, self.root_path)", "def full_url(self):\r\n\r\n url = '/' + '/'.join(p.slug for p in list(self.get_ancestors()) + [self] if p.slug)\r\n\r\n # Make sure the URL ends with a slash, as god intended.\r\n # This little endswith dance is done to handle the root url ('/') correctly.\r\n if not url.endswith('/'):\r\n url = url + '/'\r\n\r\n return url", "def _absurl(fragment):\r\n root = settings.MEDIA_URL\r\n root += root[-1:] != '/' and '/' or ''\r\n return urlparse.urljoin(root, fragment)", "def normalize_cdmi_url(self, path):\n # Turn URL path into OS path for manipulation\n mypath = url2pathname(path)\n if not os.path.isabs(mypath):\n mypath = os.path.join(url2pathname(self.pwd()), mypath)\n # normalize path\n mypath = os.path.normpath(mypath)\n if path.endswith(\"/\") and not mypath.endswith(\"/\"):\n mypath += \"/\"\n url = self.cdmi_url + pathname2url(mypath)\n return url", "def relative_uri(base, to):\n if to.startswith(SEP):\n return to\n b2 = base.split(SEP)\n t2 = to.split(SEP)\n # remove common segments (except the last segment)\n for x, y in zip(b2[:-1], t2[:-1]):\n if x != y:\n break\n b2.pop(0)\n t2.pop(0)\n if b2 == t2:\n # Special case: relative_uri('f/index.html','f/index.html')\n # returns '', not 'index.html'\n return ''\n if len(b2) == 1 and t2 == ['']:\n # Special case: relative_uri('f/index.html','f/') should\n # return './', not ''\n return '.' + SEP\n return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def base_url_path(self):\n path = urlsplit(self.base_url())[2]\n if path.endswith(\"/\"):\n path = path[:-1]\n return path", "def urlpath(self, url):\n\t\t# remove schema + hostname\n\t\turl = re.sub('^[^:]*://[^/]+', '/', url)\n\n\t\treturn self.canonicalize(url)", "def get_relative_url(current, target):\n rel = os.path.relpath(target, current)\n\n if rel[-1] != \"/\":\n if \".\" not in rel.split(\"/\")[-1]:\n rel += \"/\"\n\n if not rel.startswith(\"../\") and rel != \"./\":\n rel = f\"./{rel}\"\n\n return rel", "def buildpath(self):\n basepath = urlutil.href_settings.root + (self.relpath if self.relpath else cherrypy.request.path_info)\n if basepath.find('~') < 0:\n basepath += ('' if basepath.endswith('/') else '/') + '~'\n if cherrypy.request.query_string:\n basepath += ('&' if basepath.find('?') >= 0 else '?') + cherrypy.request.query_string\n return basepath", "def get_path(self, normalize = False):\r\n\r\n split = self.path_s.split(\"?\", 1)\r\n path = split[0]\r\n if not normalize: return path\r\n if not path.startswith((\"http://\", \"https://\")): return path\r\n return netius.legacy.urlparse(path).path", "def _abs_path(rel_path):\n return os.path.join(BASE_DIR, rel_path)", "def clean_url(app_server, base_path) -> str:\n if app_server.endswith('/'):\n base_url = f\"{app_server[:-1]}{base_path}\"\n else:\n base_url = f\"{app_server}/{base_path}\"\n return base_url", "def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url", "def absolute(self):\n if self.relative == '':\n return self.root # don't join in this case as that appends trailing '/'\n return os.path.join(self.root, self.relative)", "def get_short_url_base():", "def fix_url_path(url: str) -> str:\n return url if url.endswith(\"/\") else url + \"/\"", "def _absPath(self, relpath):\n\n # Pass through URIs and absolute paths.\n if self.isUrl(relpath) or relpath[0] == '/':\n return relpath\n\n # This won't deal with ~user/ syntax, but it's much less\n # common anyway.\n if relpath.startswith('~/') and 'HOME' in os.environ:\n return os.path.join(os.environ['HOME'], relpath[2:])\n\n if self._configFileStack:\n relativeTo = os.path.dirname(self._configFileStack[-1])\n else:\n relativeTo = os.getcwd()\n\n if self.isUrl(relativeTo):\n parts = urlparse.urlsplit(relativeTo)\n return urlparse.urlunsplit((parts.scheme, parts.netloc, os.path.normpath(os.path.join(parts.path, relpath)), parts.query, parts.fragment))\n return os.path.normpath(os.path.join(relativeTo, relpath))", "def normalize_url(url: str) -> str:\n parts = urlparse(url)\n\n path = quote(parts.path)\n while '//' in path:\n path = path.replace(\"//\", \"/\")\n\n return urlunparse(parts._replace(path=path))", "def _fullpath(self, path):\n splitpath = path.split(self._baseurl, 2)\n if len(splitpath) == 1:\n result = os.path.join(self._baseurl, path)\n else:\n result = path # path contains baseurl already\n return result", "def url_abs(name, *args):\n\tprotocol = settings.PROTOCOL\n\tdomain = settings.DOMAIN\n\turl = reverse(name, args=args)\n\tabs_path = '{}://{}{}'.format(protocol, domain, url)\n\t\n\treturn abs_path", "def fix_apiroot(root):\n if '://' in root:\n return root\n if ('/' not in root) or ('.' not in root.split('/')[0]):\n root = \"www.pennapps.com/\" + root\n return \"http://%s\" % root", "def build_absolute_url(self, path_or_url):\n return urllib.parse.urljoin(self.parsed_url.geturl(), path_or_url)", "def resolve_url(url, redirects):\n s = url.find(':')\n if s < 0:\n return url\n scheme, rest = url[:s], url[s+1:]\n if scheme in redirects:\n root = redirects[scheme]\n elif scheme in REPO_ROOTS:\n root = REPO_ROOTS[scheme]\n else:\n return url\n root = root.rstrip('/')\n rest = rest.lstrip('/')\n return '/'.join([root, rest])", "def base_uri(relative_path=''):\n base_path = get_app_root()\n if not os.path.exists(base_path):\n raise ValueError('Path %s does not exist' % base_path)\n\n return 'file://%s' % os.path.join(base_path, relative_path)", "def url_root(self):\n return self.__get_option('url_root')", "def getFullURL(self, date):\n\n base = self.getBaseURL()\n path = self.getPath( date )\n return f'{base}/{path}'", "def _convert_path_to_url(path):\n\n project_normalized = os.path.abspath(PROJECT_ROOT)\n path_normalized = os.path.abspath(path)\n if not path_normalized.startswith(project_normalized):\n return None\n\n path_tail = path_normalized[len(project_normalized):]\n\n folders=[]\n while True:\n path_tail, folder=os.path.split(path_tail)\n if folder:\n if folder in EXCLUDED_DIRS or (EXCLUDE_HIDDEN and folder.startswith(\".\")):\n return None\n folders.append(urlparse.quote_plus(folder))\n else:\n if path_tail:\n folders.append(urlparse.quote_plus(path_tail))\n break\n folders.reverse()\n\n return \"/\".join(folders[1:])", "def full_uri(path):\n protocol = 'https' if settings.USE_HTTPS else 'http'\n domain = Site.objects.get_current().domain\n return \"{}://{}{}\".format(protocol, domain, path)", "def get_absolute_url(base_url: str, relative_url: str) -> str:\n\n absolute_url = relative_url\n\n if absolute_url.startswith('//'):\n absolute_url = absolute_url[2:]\n\n if absolute_url.startswith('/'):\n if base_url.endswith('/'):\n base_url = base_url[:-1]\n\n absolute_url = base_url + absolute_url\n\n return absolute_url", "def path_to_url(path):\r\n if os.sep == '/':\r\n return path\r\n else:\r\n return '/'.join(split_all(path))", "def normalize_upstream(path):\n if not path:\n return path\n if ':' not in path:\n return os.path.abspath(path)\n return path", "def normalize_url(url):\n # print(url)\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'https://{}/{}'.format(zone_name, url.replace('//', '/'))\n return url", "def NormalizeLocation (uri, parent_uri=None, prefix_map=None):\n if uri is None:\n return uri\n if parent_uri is None:\n abs_uri = uri\n else:\n abs_uri = urlparse.urljoin(parent_uri, uri)\n if prefix_map is None:\n prefix_map = LocationPrefixRewriteMap_\n for (pfx, sub) in six.iteritems(prefix_map):\n if abs_uri.startswith(pfx):\n abs_uri = sub + abs_uri[len(pfx):]\n if 0 > abs_uri.find(':'):\n abs_uri = os.path.realpath(abs_uri)\n return abs_uri", "def url_slug(self):\n return self.module_path.split(\".\")[-1].lower() + \"/\"", "def full(self):\n url = (self.scheme + ':') if self.scheme else ''\n url += '//' + self.netloc + self.relative()\n return url", "def normalize(cls, target):\n parts = collections.deque()\n for p in target.split('/'):\n if (p == '' or p == '.') and len(parts) > 0:\n pass\n elif p == '..' and len(parts) > 0:\n parts.pop()\n else:\n parts.append(p)\n return '/'.join(parts)", "def get_path_relative_to_http_root(file_path):\n return os.path.relpath(file_path, get_http_path_prefix())", "def as_base(path):\n path = path if path.startswith('/') else '/' + path\n return path if path.endswith('/') else path + '/'", "def root_rel_path(self):\n return os.path.dirname(self.image.name)", "def _get_full_path(self, request):\n # get rid of the preceding /\n url = request.get_uri()[1:] if request.get_uri()[0] == \"/\" else \\\n request.get_uri()\n\n # if url is / change to index.html\n url = \"index.html\" if url == \"\" else url\n\n full_file_path = path.join(self._root, url)\n full_file_path = path.realpath(full_file_path)\n\n return full_file_path", "def rememberRootURL():", "def get_absolute_uri(self, uri):\n url_parts = urllib.parse.urlparse(uri)\n if url_parts.scheme not in urllib.parse.uses_relative \\\n or url_parts.path.startswith('/') \\\n or self.parser.base_uri is None:\n return uri\n return urllib.parse.urljoin(self.parser.base_uri, uri)", "def _relpath(self, path):\n\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n path = os.path.normpath(unquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = ''\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word not in (os.curdir, os.pardir):\n path = os.path.join(path, word)\n\n return path", "def get_absolute_url(path):\n if is_absolute_url(path):\n return path\n site = settings.SITES['front']\n return build_url(path, scheme=site['scheme'], domain=site['domain'])", "def fix_url(url, root):\n if root in url:\n if validate_url(url):\n return url\n else:\n if not url.endswith('/'):\n if validate_url(url + '/'):\n return url + '/'\n if url.startswith('https://'):\n if validate_url(url[:4] + url[5:]):\n return url[:4] + url[5:]\n else:\n return None\n else:\n return None\n else:\n parsed = get_root_domain(url)\n if parsed == '':\n if url.startswith('/'): # '/link'\n if validate_url(root[:-1] + url):\n return root[:-1] + url\n else:\n return None\n else: # 'link'\n if url.startswith('./'): # '/link'\n if validate_url(root + url[2:]):\n return root[:-1] + url\n else:\n return None\n elif validate_url(root + url):\n return root + url\n else:\n return None\n else:\n return None", "def path_for(self, url, pagename):\n parts = pagename.split('/')[:-1]\n if len(parts) == 0:\n return url[1:]\n return os.path.relpath(url, '/%s' % '/'.join(parts))", "def _get_base_url(self):\n return '/{}/'.format(self.name.replace('__', '/'))", "def get_absolute_url(rel_url):\n protocol = \"http\" if settings.DEBUG else \"https\"\n domain = Site.objects.get_current().domain\n return f\"{protocol}://{domain}{rel_url}\"", "def get_normalized_url(url):\r\n scheme, netloc, path, params, query, fragment = urlparse(url)\r\n\r\n # Exclude default port numbers.\r\n if scheme == 'http' and netloc[-3:] == ':80':\r\n netloc = netloc[:-3]\r\n elif scheme == 'https' and netloc[-4:] == ':443':\r\n netloc = netloc[:-4]\r\n if scheme not in ('http', 'https'):\r\n raise ValueError(\"Unsupported URL %s (%s).\" % (url, scheme))\r\n\r\n # Normalized URL excludes params, query, and fragment.\r\n return urlunparse((scheme, netloc, path, None, None, None))", "def _relativize(base: str, current: str) -> str:\n if current.startswith(base):\n return current.replace(base, \"\", 1)\n return current", "def root_url(self) -> str:\n return self.root_hartree.har.root_url", "def translate_path(self, path):\r\n root_dir = self.server.config.get('root_dir')\r\n path = '{}{}'.format(root_dir, path)\r\n return path.split('?')[0]", "def normalize_url(link_url, page_url):\n # Strip off the file name from the current page's URL.\n page_path = os.path.dirname(page_url)\n # Join (concatenate) the current page's URL path to the new link.\n joined_url = os.path.join(page_path, link_url)\n # Normalize the resulting path (deal with relative folder references).\n normalized_url = os.path.normpath(joined_url)\n # Return the result, replacing backslashes with slashes.\n return normalized_url.replace(\"\\\\\", \"/\")", "def _normalized_path(path):\n return os.path.abspath(os.path.expanduser(path))", "def url(self):\n return (urljoin(self.lodgeit.address, self.relative_url)\n if self.relative_url else None)", "def rememberRootURL(self, url=None):\n if url is None:\n url = self.prePathURL()\n # remove one segment\n self.appRootURL = url[:url.rindex(\"/\")]\n else:\n self.appRootURL = url", "def get_correct_url(request: flask.Request) -> str:\n\n parsed_url = urlparse(request.url_root)\n request_scheme = request.headers.get('X-Scheme')\n if request_scheme is not None:\n # use the same scheme that the request used\n return parsed_url._replace(scheme=request_scheme).geturl()\n elif parsed_url.scheme == \"http\" and \"localhost\" not in parsed_url.netloc:\n # if the request scheme is unknown use https unless we're referring\n # to localhost\n return parsed_url._replace(scheme=\"https\").geturl()\n else:\n # give up and don't make any changes\n return request.url_root", "def normalize_url(self, url):\n pass", "def relative_path(root_dir, dirpath, f):\n full = os.path.join(dirpath, f)\n if not root_dir:\n return full\n if not full.startswith(root_dir):\n print(\"ERROR - bad path for root\", full)\n return None\n full = full[len(root_dir):]\n if full.startswith(\"/\"):\n return full[1:]\n return full", "def build_url(base_url, path):\n if absolute_http_url_regexp.match(path):\n return path\n elif base_url:\n return \"{}/{}\".format(base_url.rstrip(\"/\"), path.lstrip(\"/\"))\n else:\n raise exceptions.ParamsError(\"base url missed!\")", "def _convert_url_to_path(url):\n\n decoded_path = urlparse.unquote(url)\n segments = [s.strip() for s in decoded_path.split(\"/\")]\n for i in EXCLUDED_DIRS:\n if i in segments:\n return None\n if EXCLUDE_HIDDEN:\n for i in segments:\n if i.startswith('.'):\n return None\n\n return os.path.join(PROJECT_ROOT, *segments)", "def normalize(seed_url, link):\n link, _ = urldefrag(link) # remove hash to avoid duplicates\n return urljoin(seed_url, link)", "def book_rel_url_to_book_abs_url(relative_url):\n return \"https://books.toscrape.com/catalogue/\" + relative_url.removeprefix('../../../')", "def _urlnorm(self, uri):\r\n (scheme, authority, path, query, fragment) = parse_uri(uri)\r\n if not scheme or not authority:\r\n raise Exception(\"Only absolute URIs are allowed. uri = %s\" % uri)\r\n authority = authority.lower()\r\n scheme = scheme.lower()\r\n if not path:\r\n path = \"/\"\r\n\r\n # Could do syntax based normalization of the URI before\r\n # computing the digest. See Section 6.2.2 of Std 66.\r\n request_uri = query and \"?\".join([path, query]) or path\r\n scheme = scheme.lower()\r\n defrag_uri = scheme + \"://\" + authority + request_uri\r\n\r\n return defrag_uri", "def normalize_url(node):\n if not node:\n node = DEFAULT_NODE\n elif '://' not in node:\n node = '//{}'.format(node)\n parts = urlparse(node, scheme='http', allow_fragments=False)\n port = parts.port if parts.port else _get_default_port(parts.scheme)\n netloc = '{}:{}'.format(parts.hostname, port)\n return urlunparse((parts.scheme, netloc, parts.path, '', '', ''))", "def _absolute_root(path: _Path) -> str:\n path_ = Path(path)\n parent = path_.parent\n\n if path_.exists():\n return str(path_.resolve())\n else:\n return str(parent.resolve() / path_.name)", "def create_absolute_url(path: str) -> str:\n domain = settings.ALLOWED_HOSTS[0]\n return \"https://{domain}{path}\".format(domain=domain, path=path)", "def _GetRelPath(self, filename):\n assert filename.startswith(self.subdir), (filename, self.subdir)\n return filename[len(self.subdir):].lstrip(r\"\\/\")", "def _get_full_path(self, path, environ):\n if path.startswith('//'):\n path = path[1:]\n elif path.startswith('/'):\n path = environ.get('SCRIPT_NAME', '') + path\n return path", "def normalizePath(path):\n if path == None or len(path) == 0 or path == '/':\n return '/'\n buff = '/' + path if path[0] != '/' else path\n return buff.replace('//', '/')", "def _make_url(self, path):\n if not self.base_location:\n raise ValueError(\"No base_location set. Cannot construct url.\")\n\n if path:\n path = self._normalise_last_slashes(path)\n path = self._normalise_head_slashes(path)\n\n return \"\".join((self.base_location, self.endpoint, path))", "def _get_url(self, absolute):", "def buildRelativeURI(URI, base):\n ret = libxml2mod.xmlBuildRelativeURI(URI, base)\n return ret", "def normalize_url(url):\n parse = urlparse(url)\n\n # netloc should be lowercase\n netloc = parse.netloc.lower()\n if parse.scheme == \"http\":\n if netloc.endswith(\":80\"):\n netloc = netloc[:-3]\n\n elif parse.scheme == \"https\" and netloc.endswith(\":443\"):\n netloc = netloc[:-4]\n\n # add a '/' at the end of the netloc if there in no path\n if not parse.path:\n netloc = netloc + \"/\"\n\n return \"{}://{}{}\".format(parse.scheme, netloc, parse.path)", "def _get_file_url(path):\n return urlparse.urljoin(BASE_URL, path)", "def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]", "def relName(path, cwd=None, root=None):\n relRoot = os.path.normpath((root or projectRoot)) + os.sep\n cwd = os.path.abspath((cwd or os.getcwd())) + os.sep\n if path == cwd or path == cwd[:-1]:\n return \".\"\n\n if path.startswith(cwd):\n # The relative name is below the CWD, so we simply strip off the\n # leading parts.\n return path[len(cwd):]\n\n if path.startswith(relRoot) and cwd.startswith(relRoot):\n # The path is below the nominal root but parallel to the CWD. We need\n # to add some '../' parts.\n relToRootPath = path[len(relRoot):]\n relToRootCWD = cwd[len(relRoot):-1]\n count = 0\n while count < 1000 and relToRootCWD and relToRootCWD != os.sep:\n relToRootCWD, b = os.path.split(relToRootCWD)\n relToRootPath = \"..\" + os.sep + relToRootPath\n assert count < 1000\n return relToRootPath\n\n return path", "def get_absolute_url(self):\n\n url = reverse('comicsite.views.site', args=[self.short_name])\n return url", "def __url(self, *els):\n\n urls = [str(el) for el in els]\n urls.insert(0, self.BASE_URL)\n\n return '/'.join(s.strip('/') for s in urls)", "def _normalize_path(path):\n if path is None:\n directory = BASE_PATH\n path = ''\n else:\n path = op.normpath(path)\n directory = op.normpath(op.join(BASE_PATH, path))\n\n if not is_in_folder(BASE_PATH, directory):\n abort(404)\n\n if not op.exists(directory):\n abort(404)\n\n return BASE_PATH, directory, path", "def standard_path_from_server_root(self, arg: str) -> str:\n # Remove beginning and ending quotes\n arg = arg.strip(\"'\\\"\")\n\n # Standardize the include argument based on server root\n if not arg.startswith(\"/\"):\n # Normpath will condense ../\n arg = os.path.normpath(os.path.join(self.root, arg))\n else:\n arg = os.path.normpath(arg)\n return arg", "def get_full_url(self, part_url):\n return BASE_URL + part_url", "def format_ha_url(self, url):\n is_relative = not url.startswith(\"http\")\n if not is_relative:\n return url\n elif is_relative and self.ha_url is None:\n raise ValueError(\"ha_url must be specified when using relative url for photo_attribute.\") \n else:\n return urljoin(self.ha_url, url)", "def getRootURL(self):\n return self.appRootURL", "def _get_full_url(self, link, url):\n from webcrawler.settings import process_link_value\n path = urlparse.urljoin(url, link)\n path = process_link_value(path)\n return path", "def translate(self, uri_path):\n _parts = [self.root] + uri_path.lstrip('/').split('/')\n fs_path = os.sep.join(_parts)\n fs_path = os.path.realpath(fs_path)\n return fs_path", "def rel_resolve(path):\n if os.path.isabs(path):\n return os.path.abspath(path)\n else:\n return os.path.join(SCRIPTDIR, path)", "def get_full_url(request_handler, path):\n pr = urlparse(request_handler.request.url)\n return '%s://%s%s' % (pr.scheme, pr.netloc, path)", "def relatif (path, root = None):\n\tfrom os import sep, getcwd\n\tpath = normalizePath(path)\n\tif root != None:\n\t\troot =normalizePath(root)\n\t# If the path is empty\n\tif len(path) == 0:\n\t\treturn \"\"\n\n\t# If the root is not defined\n\tif root == None:\n\t\t# Take the current directory\n\t\troot = getcwd()\n\t\t\n\t# Cut paths to directory\n\tif path[-1] == sep:\n\t\tpath = path[:-1]\n\tspPath = path.split(sep)\n\tspRoot = root.split(sep)\n\n\t# Constructs the list of the identical path\n\tequal = []\n\tfor i in range(0,mini(len(spRoot),len(spPath))):\n\t\tif spRoot[i] != spPath[i]:\n\t\t\tbreak\n\t\telse:\n\t\t\tequal.append(spPath[i])\n\n\t# If the identical list is not empty\n\tif len(equal) != 0:\n\t\t# Remove identical paths \n\t\tspRoot = spRoot[len(equal):]\n\t\tspPath = spPath[len(equal):]\n\t\t\n\t\t# Add an indirection\n\t\tfor i in range(len(spRoot)):\n\t\t\tspPath.insert(0,\"..\")\n\n\t# Constructs the relative path\n\tresult = \"\"\n\tfor i in spPath:\n\t\tresult += i + sep\n\n\tif result != \"\":\n\t\treturn result[:-1]\n\telse:\n\t\treturn \"\"", "def relpath(d1, d2):\n assert d1.startswith(d2)\n return d1[len(d2):].lstrip('/')", "def normalize_base_dir(base_dir: Optional[str]) -> str:\n if base_dir is None:\n base_dir = os.path.abspath(\"\")\n elif not is_absolute_path(base_dir):\n base_dir = os.path.abspath(base_dir)\n while base_dir != '/' and base_dir.endswith('/'):\n base_dir = base_dir[:-1]\n return base_dir", "def root(*args):\n return join(abspath(dirname(__file__)), *args)", "def _format_url(s):\n return u'%s%s\\n' % (BASE_URL, s.get_absolute_url())", "def build_image_path(self, src):\r\n o = urlparse(src)\r\n # we have a full url\r\n if o.hostname:\r\n return o.geturl()\r\n # we have a relative url\r\n return urljoin(self.target_url, src)" ]
[ "0.75811696", "0.7120959", "0.7007499", "0.6957676", "0.6937742", "0.6913981", "0.6850913", "0.67784756", "0.6737724", "0.67350227", "0.67350227", "0.6698321", "0.6689216", "0.6677582", "0.6671769", "0.6658958", "0.66039646", "0.6588936", "0.65848976", "0.65747464", "0.65624976", "0.65560746", "0.6540153", "0.6529589", "0.6506639", "0.64965904", "0.64825475", "0.6440993", "0.64391786", "0.6414008", "0.64008963", "0.6396806", "0.63948256", "0.6379096", "0.6372548", "0.63491225", "0.63359916", "0.6333186", "0.6322359", "0.63223565", "0.63198954", "0.6313786", "0.6311276", "0.6309267", "0.63083106", "0.6306435", "0.6304921", "0.63002986", "0.62857884", "0.6285526", "0.62818193", "0.62717044", "0.6258886", "0.6246483", "0.6204633", "0.6204262", "0.61963314", "0.61922073", "0.6185197", "0.61697644", "0.6169406", "0.6168088", "0.6165018", "0.61642975", "0.61566323", "0.61542124", "0.61393607", "0.61325586", "0.61247224", "0.6115374", "0.61123025", "0.6108789", "0.6106661", "0.61064255", "0.61045235", "0.61013687", "0.6099683", "0.6099285", "0.60546935", "0.60510457", "0.60448945", "0.60414934", "0.60401183", "0.60218984", "0.6012408", "0.60049397", "0.60037595", "0.6000054", "0.599092", "0.59811246", "0.5981091", "0.5979042", "0.5965888", "0.596547", "0.59648514", "0.5964425", "0.59631974", "0.59552157", "0.59504575", "0.5943516" ]
0.7165835
1
Generate Post objects from markdown. Date must be present in each post and posts must be ordrered by date.
def parse_markdown(filename): if not os.path.exists(filename): error('File not found', filename) posts = list() with open(filename, encoding='utf-8') as f: line = next(f) if line.startswith('# '): title = line[2:].strip() record = [] next(f) else: title = None record = [line] for line in f: if not line.startswith('___'): record.append(line) else: posts.append(Post.from_markdown(record)) record = [] # set rank of posts in date daterank = defaultdict(int) for post in posts: daterank[post.date] += 1 post.daterank = daterank[post.date] # check post order for post1, post2 in zip(posts[:-1], posts[1:]): if post1.date > post2.date: error('Posts are not ordered', f'{post1.date} > {post2.date}') return title, posts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def markdown_post(post):\n post['entry'] = markdown(post['entry'].replace(\"\\n\",\" \\n\"), output=\"html5\")\n return post", "def parse_post_text(formatted_content):\n post = {}\n # Parse Mod comments and remove them from the text.\n potential_comments = re.finditer(\"\\[.+?\\]\", formatted_content, re.DOTALL)\n comments = []\n for comment_match in potential_comments:\n comment = comment_match.group()\n mod = re.search(r\"\\-\\s?Mod\\.\\s?(?P<mod>\\w+\\b)\", comment)\n if mod:\n comments.append({\n \"comment\" : comment,\n \"mod\" : mod.group(\"mod\")\n })\n post[\"modComments\"] = comments\n \n # Comments are removed from the post test so that\n # links, reports, etc. mentioned by mods are not extracted.\n no_comment_txt = formatted_content\n for comment in comments:\n no_comment_txt = no_comment_txt.replace(comment[\"comment\"], \"\")\n \n metadata, header_end = parse_post_metadata(no_comment_txt)\n post.update(metadata)\n \n sections = re.split(r\"^[\\*#]{3,}\\s*$\", no_comment_txt[header_end:], flags=re.M)\n articles = []\n \n # Some posts have articles which are parsed into multiple sections:\n # Ex: http://www.promedmail.org/direct.php?id=2194235\n # The section parsing code tries to recombine these by concatenating\n # unrecognized sections onto the previous sections if they form an article.\n # article_start_idx keeps track of the first section in the article.\n article_start_idx = None\n \n for idx, section in enumerate(sections):\n section = section.strip()\n article = parse_article_text(section, post_date=post['promedDate'])\n # Check if the section contains an actual article by seeing which\n # properties could be parsed.\n if article.get('source') or article.get('date'):\n articles.append(article)\n article_start_idx = idx\n else:\n # When a section cannot be parsed as an article the following code\n # tries to determine what it is. If the type cannot be determined\n # an error or warning is thrown.\n # These warnings can be used to find sections which are not being\n # correctly parsed.\n # Posts with known issues:\n # http://www.promedmail.org/direct.php?id=19990512.0773\n if re.search(r\"Visit ProMED-mail\\'s web site at|\"\n r\"Please support (the \\d{4}\\s)?ProMED\\-mail|\"\n r\"Donate to ProMED\\-mail. Details available at|\"\n r\"ProMED\\-mail makes every effort to verify the reports|\"\n r\"PROMED\\-MAIL FREQUENTLY ASKED QUESTIONS|\"\n r\"Become a ProMED\\-mail Premium Subscriber|\"\n r\"A ProMED\\-mail post\",\n section, re.I):\n # boilerplate promed notice section\n pass\n elif re.search(r\"In this (update|post(ing)?)\", section):\n # table of contents section\n pass\n elif re.search(r\"Cases in various countries\", section):\n # This type of post typically has links to several articles\n # with single sentence summaries.\n # Ex: http://www.promedmail.org/direct.php?id=20131125.2073661\n pass\n elif section == \"\":\n # empty section\n pass\n elif idx == 0 and section.count(\"\\n\") < 2:\n # probably the article title\n pass\n else:\n if article_start_idx != None:\n article = parse_article_text(\n \"\\n#####\\n\".join(\n sections[article_start_idx:idx]).strip(),\n post_date=post['promedDate'])\n assert article.get('source') or article.get('date')\n articles[-1] = article\n continue\n else:\n print \"Unexpected Section (%s):\" % post['archiveNumber'], [section[0:50] + \"...\"]\n article_start_idx = None\n post['articles'] = articles\n return post", "def rebuild_from_yaml(args):\n\n git_checkout_branch('gh-pages')\n\n posts = []\n for fname in glob('_posts/*.html'):\n with codecs.open(fname, 'r', 'utf-8') as f:\n c = f.read()\n # we only want the yaml frontmatter\n start = c.index('---') + 3\n end = c.rindex('---')\n frontmatter = yaml.safe_load(c[start:end])\n\n posts.append(Post(**frontmatter['api_data']['post']))\n\n _write_out(posts, yaml=False, supporting=True)", "def parse_article_text(article_text, post_date=datetime.datetime.now()):\n result = {}\n\n metadata_start = 0\n main_content_start = 0\n main_content_end = len(article_text)\n \n article_date_match = re.search(r\"^Date:\\s(?P<date>[^\\(\\[\\n]+)\", article_text, re.M)\n if article_date_match:\n # There may be more than one source date in summary articles.\n # Example: http://promedmail.org/direct.php?id=1073176\n # Summary articles are not a focus so currently only the first date\n # is recorded.\n source_date = parse_datetime(\n article_date_match.group(\"date\")\n )\n\n if source_date:\n result[\"date\"] = datetime_to_utc(source_date)\n metadata_start = min(article_date_match.start(), metadata_start)\n main_content_start = max(article_date_match.end(), main_content_start)\n # The year is checked to avoid typos like 200_ that throw\n # the date off by a large factor.\n # Example: http://www.promedmail.org/direct.php?id=45850 (article 2)\n if result[\"date\"].year < 1900:\n result[\"date\"] = None\n # Some articles have timestamps that are incorrectly parsed.\n # Current examples:\n # http://www.promedmail.org/direct.php?id=43918\n # http://www.promedmail.org/direct.php?id=2200173\n # Some of these incorrect timestamps can be removed by verifying that\n # they preceed the time of the posting. A day of slop time is allowed\n # to account for variations due to incorrect timezones.\n elif result[\"date\"] > post_date + datetime.timedelta(1):\n result[\"date\"] = None\n else:\n result[\"date\"] = None\n \n source_match = re.search(r\"Source:\\s(?P<name>[^\\[\\n]+)\" +\\\n r\"(\\s(?P<edits>\\[.*))?\" +\\\n r\"\\n\" +\\\n r\"(?P<url>http.+)?\", article_text)\n if source_match:\n result[\"source\"] = source_match.groupdict()\n metadata_start = min(source_match.start(), metadata_start)\n main_content_start = max(source_match.end(), main_content_start)\n \n heading_match = re.search(r\"^(?P<idx>\\[\\d\\]\\s)?\" +\\\n r\"(?P<heading>\\S+.*)\\n\",\n article_text[0:metadata_start], re.M)\n if heading_match:\n result[\"heading\"] = heading_match.group(\"heading\")\n \n communicated_match = re.search(communicated_by_regex, article_text, re.M)\n if communicated_match:\n result[\"communicatedBy\"] = communicated_match.group(\"communicated_by\")\n main_content_end = min(communicated_match.start(), main_content_end)\n \n result[\"content\"] = article_text[main_content_start:main_content_end].strip()\n return result", "def get_contents(\n self, post_ids: List[str], datetime_filter_fn: Optional[Callable[[datetime], bool]] = None\n ) -> List[str]:\n contents = []\n url = f\"http://blog.naver.com/PostView.nhn\"\n params = {\"blogId\": self.naver_id}\n for post_id in post_ids:\n params[\"logNo\"] = post_id\n\n # Get contents of a post\n response = self.session.get(url, params=params)\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Smart editor 3\n text = soup.select_one(f\"#post-view{post_id} > div > div > div.se-main-container\")\n # Smart editor 2\n if not text:\n text = soup.select_one(\n f\"#post-view{post_id} > div > div > div.se_component_wrap.sect_dsc.__se_component_area\"\n )\n\n if not text:\n text = soup.select_one(f\"#post-view{post_id}\")\n if text:\n text = text.get_text(\"\\n\").replace(\"\\xa0\", \" \") # Space unicode replace\n else:\n print(f\"[Error] cannot select content in {post_id}.\", file=sys.stderr)\n continue\n\n text = re.sub(\"\\s+\", \" \", text).strip()\n if datetime_filter_fn is None:\n contents.append(text)\n continue\n\n date_time = soup.select(\n f\"#post-view{post_id} > div > div > div > div > div > div.blog2_container > span.se_publishDate.pcol2\"\n )\n date_time += soup.select(\"#printPost1 > tr > td.bcc > table > tr > td > p.date.fil5\")\n\n if date_time:\n date_time = date_time[0].get_text()\n post_datetime = datetime.strptime(date_time, \"%Y. %m. %d. %H:%M\")\n if not datetime_filter_fn(post_datetime):\n continue\n else:\n print(f\"[Error] cannot select datetime in {post_id}, this post is not filtered\")\n\n contents.append(text)\n\n print(f\"Get contents: {len(contents)} found.\")\n return contents", "def parse_post_metadata(post_text):\n result = {}\n \n header_end = 0\n \n promed_date_match = re.search(\n r\"Published Date:\\s(?P<date>.*)\", post_text)\n result[\"promedDate\"] = parse_promed_pub_datetime(\n promed_date_match.group(\"date\"))\n \n archive_match = re.search(r\"Archive Number: (?P<num>.*)\", post_text)\n result[\"archiveNumber\"] = archive_match.group(\"num\")\n header_end = archive_match.end()\n \n subject = re.search(r\"Subject:\\s(?P<subject>.*)\", post_text).group(\"subject\")\n result[\"subject\"] = parse_subject_line(subject)\n result[\"subject\"][\"raw\"] = subject\n \n # This will not find all linked reports.\n # Some older posts refrence posts using different indexes I do not know\n # how to interpret.\n # Example: http://promedmail.org/direct.php?id=2194235\n result[\"linkedReports\"] = [\n report_id for report_id in re.findall(r\"\\d{8}\\.\\d+\", post_text)]\n \n # Most links will be article source urls or links to promed.\n result[\"links\"] = list(set(\n re.findall(r\"http\\S+[^(\\.\\])(\\.\\)>\\s]\", post_text)))\n result[\"links\"].sort()\n \n communicated_match = re.search(communicated_by_regex, post_text, re.M)\n if communicated_match:\n result[\"communicatedBy\"] = communicated_match.group(\"communicated_by\")\n return result, header_end", "def _parse_markdown(self):\n renderer = MyRenderer()\n md = mistune.Markdown(renderer=renderer)\n md.render(self._markdown_text)\n self._bash_commands = renderer._bash_commands", "def markdown(text, *args, **kwargs):\n md = StMarkdown(*args, **kwargs)\n return md.convert(text)", "def parse(text):\n md = markdown.Markdown(['codehilite', 'tables', ])\n\n for iref in re.findall(img_ref_re, text):\n img_id = iref[7]\n try:\n image = FlatPageImage.objects.get(pk=int(img_id))\n md.references[img_id] = (image.image_path.url, '')\n except ObjectDoesNotExist:\n pass\n\n for lref in re.findall(reference_re, text):\n doc_name = lref[7]\n try:\n doc = File.objects.get(name=doc_name)\n md.references[doc_name]= (doc.url, doc.name)\n except ObjectDoesNotExist:\n pass\n\n return md.convert(text)", "def markdown(self, text):\n\n try:\n html = markdown.markdown(text)\n data = self.convert_content(html)\n return data\n except Exception as e:\n Utils.log(traceback.format_exc())\n Utils.send('markdown.error')\n Utils.error(e.args[0])", "def _populate_posts(self, channel, url):\n import feedparser\n\n Post = get_model('articles', 'Post')\n Image = get_model('images', 'Image')\n\n parser = feedparser.parse(url)\n\n for entry in parser['entries']:\n # Some entries are incomplete and have only the title, need to\n # ignore these entries.\n if not entry.get('summary'):\n continue\n\n # The title may have only 140 characters\n title = self._truncate_string(entry['title'], 140)\n slug = slugify(title)\n headline = entry['summary']\n\n # Some entries do not have the 'content' field, in this case we\n # get the 'summary' field instead.\n if entry.get('content'):\n content = entry['content'][0]['value']\n else:\n content = entry['summary']\n\n # When we find a entry that already is registered we don't need\n # continue because the following registries already be registered.\n exists = Post.objects.filter(slug=slug).count()\n if exists:\n break\n\n # Check if has some image in the post content.\n # NOTE: For the best user experience we use only the posts that\n # have images.\n image_url = self._get_image_url_in_content(content)\n if image_url:\n main_image = Image.objects.create(\n title=title,\n slug=slug,\n archive_link=image_url,\n published=True,\n user=self._user\n )\n # Generate the 'short_title' based on 'content'\n short_title = re.sub('<[^<]+?>', '', content).encode('utf-8')\n short_title = self._truncate_string(short_title.strip(), 140)\n\n post = Post.objects.create(\n title=title,\n short_title=short_title,\n slug=slug,\n headline=headline,\n content=content,\n channel=channel,\n main_image=main_image,\n show_on_root_channel=True,\n published=True,\n hat='',\n user=self._user\n )", "def process_md(text_md):\n\tprocessed_text_md = ( pre_proc.replace_br(text_md)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_false_titles)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_blank_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.replace_cid)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.replace_with_dash)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_by_hyphen)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_et_al)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_beta)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_vs)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.fix_enye)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_ellipsis)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_subtraction)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_by_colon)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_duplicated_dashes)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.fix_marks)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_title_questions)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_useless_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_duplicated_whitespaces)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_repeated_strings)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t)\n\treturn processed_text_md", "def post_content(context, is_markdown=False, *args, **kwargs):\n obj = context['object']\n content = obj.parse_content()\n\n if not content:\n content = obj.content_rendered\n\n default_template = [\"djblog/includes/post_content.html\"]\n\n if obj.custom_template:\n tpl = Template(obj.custom_template)\n\n #elif obj.template_name:\n # default_template.append(obj.template_name)\n # tpl = loader.select_template(default_template)\n\n else:\n tpl = loader.select_template(default_template)\n\n custom_context = Context({\n 'content': mark_safe(markdown(content))\n })\n\n custom_context.update(context)\n\n return mark_safe(tpl.render(custom_context))", "def format_posts(posts):\n formatted_posts = []\n\n for post in posts:\n post_data = post['data']\n formatted_post = {\n \"title\": post_data['title'],\n \"post_id\": post_data['id'],\n \"subreddit\": post_data['subreddit'],\n \"score\": post_data['score'],\n \"url\": post_data['url'],\n \"author\": post_data['author'],\n \"permalink\": format_post_permalink(post_data['permalink']),\n \"num_comments\": post_data['num_comments'],\n \"created\": post_data['created'],\n \"body\": post_data['selftext']\n }\n\n formatted_posts.append(formatted_post)\n\n return formatted_posts", "def archive_parse_for_posts(page_html):\n # <div\\s+class=\"post.+data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\n post_info_regex = \"\"\"<div\\s+class=\"post.+?data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\"\"\"\n post_info = re.findall(post_info_regex, page_html, re.IGNORECASE|re.DOTALL)\n return post_info", "def parse_posts(self):\n logger.info(\"Parsing posts\")\n\n self.df.title = self.df.title.str.strip()\n\n spam_companies = [\"Indeed Prime\"]\n self.df = self.df[~self.df[\"company\"].isin(spam_companies)]\n self.df = self.df.dropna(subset=[\"company\"])\n self.df = self.df.drop_duplicates(subset=[\"company\", \"date_posted\", \"title\"])", "def generate_post(self):\n post = {'title': self.generate_title(), 'draft': False}\n for k in ('blog', 'id', 'labels', 'categories', 'draft'):\n if k not in self.header:\n continue\n if k == 'blog':\n post[k] = {'id': self.header[k]}\n else:\n post[k] = self.header[k]\n return post", "def add_new_posts(last_updated=None):\n for blog in Blog.objects.all():\n try:\n document = feedparser.parse(blog.feed_url)\n except:\n print \"error parsing\"\n continue\n\n if last_updated is None:\n print(\"- Adding %i articles from %s\" % (len(document['entries']), blog.title))\n\n for entry in document['entries']:\n # now we create a new post\n post = Post()\n post.blog = blog\n post.title = entry['title']\n\n if 'summary' in entry:\n post.content = entry['summary']\n if 'content' in entry:\n post.content = entry['content']\n\n post.link = entry['link']\n post.save()\n else:\n # TODO: only parse from a date\n pass", "def parse():\n G.go(SITE_URL)\n articles = []\n for article in G.doc.select(\"//li[@class='regularitem']\"):\n header = article.select('h4').text()\n text = article.select('div').text()\n url = article.select('h4/a/@href').text()\n dt_string = article.select('h5').text()\n # for date format \"1 Nov 2019 00:00:00\" or \"01 Nov 2019 00:00:00\"\n article_dt = re.search(r'\\d{1,2} [a-zA-Z]+ \\d{4} \\d{2}:\\d{2}:\\d{2}', dt_string)\n if article_dt is None:\n logging.exception('Datestring format is unknown: %s', dt_string)\n continue\n article_dt = article_dt.group(0)\n article_dt = datetime.datetime.strptime(article_dt, '%d %b %Y %H:%M:%S').strftime(\"%Y-%m-%d %H:%M:%S\")\n articles.append({'header': header, 'url': url, 'text': text, 'dt': article_dt})\n return articles", "def markdown(text):\n text = gfm(text)\n text = markdown_lib.markdown(text)\n return text", "def parse_post_content(self, response):\n post = Post()\n post['title'] = response.xpath('//h2/a/text()')[0].extract()\n post['image_url'] = response.xpath(\"//div[@class='cont group']//img/@src\")[0].extract()\n yield post", "def convert(self, markdown: str) -> str:\n lines = markdown.split(NEWLINE)\n iterator = LineIterator(lines)\n\n while not iterator.is_done():\n for element in self.__elements:\n if element.is_relevant(iterator.value):\n element.replace(iterator)\n iterator.advance()\n return NEWLINE.join(iterator.lines)", "def create_posts_df(post_filenames):\n posts_list = []\n n = 1\n for post in post_filenames:\n try:\n processed_post = xmlpost_to_dict(post)\n posts_list.append(processed_post)\n except AttributeError:\n print('Error parsing post:', post)\n n += 1\n\n print(\"Posts with trouble parsing (possibly missing messages):\" + str(n))\n df = pd.DataFrame(posts_list)\n df.post_time = pd.to_datetime(df.post_time)\n df.last_edit_time = pd.to_datetime(df.last_edit_time)\n # df.set_index(['post_id'])\n\n return df", "def main(blog, date):\n template = front_matter({\n \"title\": blog,\n \"date\": get_date(\"%Y-%m-%d %H:%M:%S %z\"),\n })\n new_blog(date + '-' + blog + '.markdown', template)", "def markdown(text, **kwargs):\n import markdown\n return markdown.markdown(text, **kwargs)", "def convert_to_markdown(lines):\n # description = get_description(lines)\n blocks = get_blocks(lines)\n out = []\n for block in blocks:\n item = align_block(block)\n item = format_headings(item)\n item = format_lists(item)\n item = format_numb_list(item)\n out.append(item)\n return join_blocks(out)", "def process_posts(app, doctree):\n env = app.builder.env\n if not hasattr(env, \"ablog_posts\"):\n env.ablog_posts = {}\n post_nodes = list(doctree.findall(PostNode))\n if not post_nodes:\n return\n post_date_format = app.config[\"post_date_format\"]\n should_auto_orphan = app.config[\"post_auto_orphan\"]\n docname = env.docname\n if should_auto_orphan:\n # mark the post as 'orphan' so that\n # \"document isn't included in any toctree\" warning is not issued\n # We do not simply assign to should_auto_orphan because if auto-orphan\n # is false, we still want to respect the per-post :rst:dir`orphan` setting\n app.env.metadata[docname][\"orphan\"] = True\n blog = Blog(app)\n auto_excerpt = blog.post_auto_excerpt\n multi_post = len(post_nodes) > 1 or blog.post_always_section\n for order, node in enumerate(post_nodes, start=1):\n if node[\"excerpt\"] is None:\n node[\"excerpt\"] = auto_excerpt\n if multi_post:\n # section title, and first few paragraphs of the section of post\n # are used when there are more than 1 posts\n section = node\n while True:\n if isinstance(section, nodes.section):\n break\n section = node.parent\n else:\n section = doctree\n # get updates here, in the section that post belongs to\n # Might there be orphan updates?\n update_dates = _get_update_dates(section, docname, post_date_format)\n # Making sure that post has a title because all post titles\n # are needed when resolving post lists in documents\n title = node[\"title\"] or _get_section_title(section)\n # creating a summary here, before references are resolved\n excerpt = []\n if node.children:\n if node[\"exclude\"]:\n node.replace_self([])\n else:\n node.replace_self(node.children)\n for child in node.children:\n excerpt.append(child.deepcopy())\n elif node[\"excerpt\"]:\n count = 0\n for nod in section.findall(nodes.paragraph):\n excerpt.append(nod.deepcopy())\n count += 1\n if count >= (node[\"excerpt\"] or 0):\n break\n node.replace_self([])\n else:\n node.replace_self([])\n nimg = node[\"image\"] or blog.post_auto_image\n if nimg:\n for img, nod in enumerate(section.findall(nodes.image), start=1):\n if img == nimg:\n excerpt.append(nod.deepcopy())\n break\n date = node[\"date\"]\n if date:\n try:\n date = datetime.strptime(date, post_date_format)\n except ValueError:\n if date_parser:\n try:\n date = date_parser(date)\n except ValueError:\n raise ValueError(\"invalid post date in: \" + docname)\n else:\n raise ValueError(\n f\"invalid post date ({date}) in \" + docname + f\". Expected format: {post_date_format}\"\n )\n else:\n date = None\n # if docname ends with `index` use folder name to reference the document\n # a potential problem here is that there may be files/folders with the\n # same name, so issuing a warning when that's the case may be a good idea\n folder, label = os.path.split(docname)\n if label == \"index\":\n folder, label = os.path.split(folder)\n if not label:\n label = slugify(title)\n section_name = \"\"\n if multi_post and section.parent is not doctree:\n section_name = section.attributes[\"ids\"][0]\n label += \"-\" + section_name\n else:\n # create a reference for the post\n # if it is posting the document\n # ! this does not work for sections\n app.env.domains[\"std\"].data[\"labels\"][label] = (docname, label, title)\n app.env.domains[\"std\"].data[\"anonlabels\"][label] = (docname, label)\n if section.parent is doctree:\n section_copy = section[0].deepcopy()\n else:\n section_copy = section.deepcopy()\n # multiple posting may result having post nodes\n for nn in section_copy.findall(PostNode):\n if nn[\"exclude\"]:\n nn.replace_self([])\n else:\n nn.replace_self(node.children)\n postinfo = {\n \"docname\": docname,\n \"section\": section_name,\n \"order\": order,\n \"date\": date,\n \"update\": max(update_dates + [date]),\n \"title\": title,\n \"excerpt\": excerpt,\n \"tags\": node[\"tags\"],\n \"author\": node[\"author\"],\n \"category\": node[\"category\"],\n \"location\": node[\"location\"],\n \"language\": node[\"language\"],\n \"redirect\": node[\"redirect\"],\n \"nocomments\": node[\"nocomments\"],\n \"image\": node[\"image\"],\n \"exclude\": node[\"exclude\"],\n \"external_link\": node[\"external_link\"],\n \"doctree\": section_copy,\n }\n if docname not in env.ablog_posts:\n env.ablog_posts[docname] = []\n env.ablog_posts[docname].append(postinfo)\n # instantiate catalogs and collections here\n # so that references are created and no warnings are issued\n if app.builder.format == \"html\":\n stdlabel = env.domains[\"std\"].data[\"labels\"] # NOQA\n else:\n if hasattr(env, \"intersphinx_inventory\"):\n stdlabel = env.intersphinx_inventory.setdefault(\"std:label\", {}) # NOQA\n baseurl = getattr(env.config, \"blog_baseurl\").rstrip(\"/\") + \"/\" # NOQA\n project, version = env.config.project, str(env.config.version) # NOQA\n for key in [\"tags\", \"author\", \"category\", \"location\", \"language\"]:\n catalog = blog.catalogs[key]\n for label in postinfo[key]:\n coll = catalog[label] # NOQA\n if postinfo[\"date\"]:\n coll = blog.archive[postinfo[\"date\"].year] # NOQA", "def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list", "def posts_as_schemas(posts_from_vk: list[dict]) -> list[Post]:\n posts = []\n\n for post_from_vk in posts_from_vk:\n try:\n post = Post(\n date=post_from_vk[\"date\"],\n likes=post_from_vk[\"likes\"][\"count\"],\n text=post_from_vk[\"text\"],\n path=f\"wall{post_from_vk['owner_id']}_\" f\"{post_from_vk['id']}\",\n photos=[],\n videos=[],\n )\n except KeyError as exc:\n logger.error(\"No key %s for post: %s\", exc, post_from_vk)\n continue\n\n # Collect attachments (photos, videos etc.).\n if \"attachments\" in post_from_vk:\n attachments = post_from_vk[\"attachments\"]\n for attachment in attachments:\n if attachment[\"type\"] == \"photo\":\n try:\n photo = PostPhoto(url=\"\")\n photo.url = attachment[\"photo\"][\"sizes\"][-1][\"url\"]\n post.photos.append(photo)\n except KeyError as exc:\n logger.error(\"No key %s for photo: %s\", exc, post_from_vk)\n\n elif attachment[\"type\"] == \"video\":\n video = PostVideo(first_frame_url=\"\")\n video_from_vk = attachment[\"video\"]\n if \"first_frame\" in video_from_vk:\n video.first_frame_url = video_from_vk[\"first_frame\"][-1][\"url\"]\n elif \"image\" in video_from_vk:\n video.first_frame_url = video_from_vk[\"image\"][-1][\"url\"]\n else:\n logger.error(\"No video image found: %s\", post)\n continue\n post.videos.append(video)\n\n posts.append(post)\n\n return posts", "def parse_article(self, response):\n\n raw_post = response.css(\"div.blog.post > div.inner > div.row > article\")\n\n post_loader = ItemLoader(item=BlogPostItem(), selector=raw_post)\n post_loader.default_output_processor = TakeFirst()\n\n post_title = raw_post.css(\"div#postcontent > h1::text\").extract_first()\n post_loader.add_value(\"title\", post_title)\n post_loader.add_value(\"url\", response.request.url)\n\n post_text_selector = raw_post.css(\"div#postcontent > div#mypost\")\n post_text = post_text_selector.xpath('string(.)').extract_first()\n post_loader.add_value(\"content\", post_text[:160])\n\n pub_date_text = raw_post.css(\"div#postcontent > div.no-mobile > div.posttag.right.nomobile > span::text\").extract_first()\n pub_date = parse_date(pub_date_text)\n post_loader.add_value(\"publication_date\", pub_date)\n\n initial_author_list = raw_post.css(\n \"div#postcontent > div.no-mobile > div.postauthor > span > a.goauthor > span::text\").extract()\n author_list = [name.strip() for name in initial_author_list]\n post_authors = \"::\".join(author_list)\n post_loader.add_value(\"author\", post_authors)\n\n post_tags = raw_post.css(\"div#postcontent > a.tag.secondary::attr(title)\").extract()\n post_tags_str = \"::\".join(post_tags)\n post_loader.add_value(\"tags\", post_tags_str)\n\n return post_loader.load_item()", "def convert_to_markdown(self, text: str) -> str:", "def transform(self, actual_tokens): # noqa: C901\n transformed_data = \"\"\n avoid_processing = False\n previous_token = None\n\n for next_token in actual_tokens:\n # pre_transform = transformed_data\n if next_token.token_name == MarkdownToken.token_thematic_break:\n transformed_data += self.rehydrate_thematic_break(next_token)\n elif next_token.token_name == MarkdownToken.token_paragraph:\n transformed_data += self.rehydrate_paragraph(next_token)\n elif next_token.token_name == MarkdownToken.token_indented_code_block:\n transformed_data += self.rehydrate_indented_code_block(next_token)\n elif next_token.token_name == MarkdownToken.token_html_block:\n transformed_data += self.rehydrate_html_block(next_token)\n elif next_token.token_name == MarkdownToken.token_fenced_code_block:\n transformed_data += self.rehydrate_fenced_code_block(next_token)\n elif next_token.token_name == MarkdownToken.token_text:\n transformed_data += self.rehydrate_text(next_token)\n elif next_token.token_name == MarkdownToken.token_setext_heading:\n transformed_data += self.rehydrate_setext_heading(next_token)\n elif next_token.token_name == MarkdownToken.token_atx_heading:\n transformed_data += self.rehydrate_atx_heading(next_token)\n elif next_token.token_name == MarkdownToken.token_blank_line:\n transformed_data += self.rehydrate_blank_line(next_token)\n\n elif (\n next_token.token_name == MarkdownToken.token_unordered_list_start\n or next_token.token_name == MarkdownToken.token_ordered_list_start\n or next_token.token_name == MarkdownToken.token_block_quote\n or next_token.token_name\n == MarkdownToken.token_link_reference_definition\n or next_token.token_name == MarkdownToken.token_inline_link\n or next_token.token_name == MarkdownToken.token_inline_image\n ):\n avoid_processing = True\n break\n elif next_token.token_name == MarkdownToken.token_inline_hard_break:\n transformed_data += self.rehydrate_hard_break(next_token)\n elif next_token.token_name == MarkdownToken.token_inline_emphasis:\n transformed_data += self.rehydrate_inline_emphaisis(next_token)\n elif next_token.token_name == MarkdownToken.token_inline_uri_autolink:\n transformed_data += self.rehydrate_inline_uri_autolink(next_token)\n elif next_token.token_name == MarkdownToken.token_inline_email_autolink:\n transformed_data += self.rehydrate_inline_email_autolink(next_token)\n elif next_token.token_name == MarkdownToken.token_inline_raw_html:\n transformed_data += self.rehydrate_inline_raw_html(next_token)\n elif next_token.token_name == MarkdownToken.token_inline_code_span:\n transformed_data += self.rehydrate_inline_code_span(next_token)\n elif next_token.token_name.startswith(EndMarkdownToken.type_name_prefix):\n\n adjusted_token_name = next_token.token_name[\n len(EndMarkdownToken.type_name_prefix) :\n ]\n if adjusted_token_name == MarkdownToken.token_paragraph:\n transformed_data += self.rehydrate_paragraph_end(next_token)\n elif adjusted_token_name == MarkdownToken.token_indented_code_block:\n transformed_data += self.rehydrate_indented_code_block_end(\n next_token\n )\n elif adjusted_token_name == MarkdownToken.token_fenced_code_block:\n transformed_data += self.rehydrate_fenced_code_block_end(\n next_token, previous_token\n )\n elif adjusted_token_name == MarkdownToken.token_html_block:\n transformed_data += self.rehydrate_html_block_end(next_token)\n elif adjusted_token_name == MarkdownToken.token_setext_heading:\n transformed_data += self.rehydrate_setext_heading_end(next_token)\n elif adjusted_token_name == MarkdownToken.token_atx_heading:\n transformed_data += self.rehydrate_atx_heading_end(next_token)\n elif adjusted_token_name == MarkdownToken.token_inline_emphasis:\n transformed_data += self.rehydrate_inline_emphaisis_end(next_token)\n else:\n assert False, \"end_next_token>>\" + str(adjusted_token_name)\n else:\n assert False, \"next_token>>\" + str(next_token)\n\n print(\n \">>>>\"\n + str(next_token)\n + \"\\n---\\n\"\n + transformed_data.replace(\"\\n\", \"\\\\n\").replace(\"\\t\", \"\\\\t\")\n + \"\\n---\"\n )\n previous_token = next_token\n\n if transformed_data and transformed_data[-1] == \"\\n\":\n transformed_data = transformed_data[0:-1]\n return transformed_data, avoid_processing", "def convert_medium_post_to_ghost_json(export_folder, html_filename, post_html_content):\n logging.info(f\"Parsing {html_filename}\")\n\n # Get the publish date and slug from the exported filename\n _, filename = html_filename.split(\"/\")\n uuid, slug_from_filename, date, status = parse_medium_filename(filename)\n\n # Extract post-level metadata elements that will be at known elements\n soup = BeautifulSoup(post_html_content, 'html.parser')\n\n # - Article Title\n title = soup.find(\"h1\", {\"class\": \"p-name\"}).text\n if not title:\n title = \"Empty title\"\n # - Subtitle\n subtitle = soup.find(\"section\", {\"data-field\": \"subtitle\"}).text.strip() if soup.find(\"section\", {\"data-field\": \"subtitle\"}) else None\n # - Description\n description = soup.find(\"section\", {\"data-field\": \"description\"}).text.strip() if soup.find(\"section\", {\"data-field\": \"description\"}) else None\n\n # Canonical link\n canonical_link = None\n canonical_link_el = soup.find(\"a\", {\"class\": \"p-canonical\"})\n if canonical_link_el is not None:\n canonical_link = canonical_link_el[\"href\"]\n\n # Use same slugs as Medium, to make sure blog.q42.nl URLs don't break\n if canonical_link is not None:\n slug = canonical_link.split('/')[-1]\n else:\n slug = slug_from_filename\n\n # We will delete @q42 account from Medium, so don't use that as canonical URL\n if canonical_link.startswith(\"https://medium.com/@q42\"):\n canonical_link = None\n\n # Medium stores every comment as full story.\n # Guess if this post was a comment or a post based on if it has a post title h3 or not.\n # If it seems to be a comment, skip converting it since we have no idea what it was a comment on.\n title_el = soup.find(\"h3\", {\"class\": \"graf--title\"})\n\n # Hack: Some really old Medium posts used h2 instead of h3 for the title element.\n if not title_el:\n title_el = soup.find(\"h2\", {\"class\": \"graf--title\"})\n\n # If there's no title element, this document is probably a comment. Skip!\n if title_el is None:\n logging.warning(f\"Skipping {html_filename} because it appears to be a Medium comment, not a post!\")\n return None\n\n # All the remaining document-evel attributes we need to collect\n comment_id = None\n plain_text = None\n feature_image = None\n created_at = date\n updated_at = date\n published_at = date\n custom_excerpt = subtitle\n\n # Convert story body itself to mobiledoc format (As required by Ghost)\n parser = MediumHTMLParser()\n parser.feed(post_html_content)\n mobiledoc_post = parser.convert()\n\n # Download all the story's images to local disk cache folder\n first_image_of_post = None\n\n for card in mobiledoc_post[\"cards\"]:\n card_type = card[0]\n if card_type == \"image\":\n data = card[1]\n url = data[\"src\"]\n\n cache_folder = Path(export_folder) / \"downloaded_images\" / slug_from_filename\n new_image_path = download_image_with_local_cache(url, cache_folder)\n\n if first_image_of_post is None:\n first_image_of_post = new_image_path\n\n # TODO: Fix this when Ghost fixes https://github.com/TryGhost/Ghost/issues/9821\n # Ghost 2.0.3 has a bug where it doesn't update imported image paths, so manually add\n # /content/images.\n final_image_path_for_ghost = str(new_image_path).replace(str(export_folder), \"/content/images\")\n data[\"src\"] = final_image_path_for_ghost\n\n # If this image was the story's featured image, grab it.\n # Confusingly, post images ARE updated correctly in 2.0.3, so this path is different\n if \"featured_image\" in data:\n del data[\"featured_image\"]\n feature_image = str(new_image_path).replace(str(export_folder), \"\")\n\n # Set first image as the story's featured image, if there is no featured image in Medium post.\n if feature_image is None:\n feature_image = str(first_image_of_post).replace(str(export_folder), \"\")\n\n # If first section is an image, and we used the first image as featured image as well, remove the first image\n if mobiledoc_post[\"sections\"] and mobiledoc_post[\"sections\"][0] == [10, 0]:\n print(\"Removing first card \" + str(canonical_link_el[\"href\"]))\n del mobiledoc_post[\"sections\"][0]\n\n # Create the final post dictionary as required by Ghost 2.0\n return {\n # \"id\": id,\n \"uuid\": uuid,\n \"title\": title,\n \"slug\": slug,\n \"canonical_url\": canonical_link,\n \"mobiledoc\": json.dumps(mobiledoc_post),\n \"html\": post_html_content,\n \"comment_id\": comment_id,\n \"plaintext\": plain_text,\n \"feature_image\": feature_image,\n \"featured\": 0,\n \"page\": 0,\n \"status\": status,\n \"locale\": None,\n \"visibility\": \"public\",\n \"meta_title\": title,\n \"meta_description\": description,\n \"author_id\": \"1\",\n \"created_at\": created_at,\n \"created_by\": \"1\",\n \"updated_at\": updated_at,\n \"updated_by\": \"1\",\n \"published_at\": published_at,\n \"published_by\": \"1\",\n \"custom_excerpt\": custom_excerpt,\n \"codeinjection_head\": None,\n \"codeinjection_foot\": None,\n \"custom_template\": None,\n\n # These all inherit from the metadata title/description in Ghost, so no need to set them explicitly\n \"og_image\": None,\n \"og_title\": None,\n \"og_description\": None,\n \"twitter_image\": None,\n \"twitter_title\": None,\n \"twitter_description\": None,\n }", "def render():\n\n path = request.args.get('markdown', '')\n raw = request.args.get('raw', False)\n\n username, user_id = g.user.username, g.user.id\n\n tmpl = 'markdown-rendered.html'\n if raw:\n tmpl = 'markdown-raw.html'\n elif request.args.get('presentation'):\n # TODO(dan?) fix presentation post\n # presentation_post = {}\n # presentation_post['authors_string'] = post.author_string\n # presentation_post['tldr'] = post.tldr\n # presentation_post['html'] = html\n # html = create_presentation_text(presentation_post)\n tmpl = \"markdown-presentation.html\"\n\n if not current_app.config.get('REPOSITORY_INDEXING_ENABLED', True):\n return _render_preview(path=path, tmpl=tmpl)\n\n post = (db_session.query(Post)\n .filter(Post.path == path)\n .first())\n if not post:\n knowledge_aliases = current_repo.config.aliases\n if path in knowledge_aliases:\n # TODO: reframe as redirect\n post = (db_session.query(Post)\n .filter(Post.path == knowledge_aliases[path])\n .first())\n if not post:\n raise Exception(\"unable to find post at {}\".format(path))\n\n if post.contains_excluded_tag:\n # It's possible that someone gets a direct link to a post that has an excluded tag\n return render_template(\"error.html\")\n\n if post.private:\n groups = post.groups\n users = set()\n for group in groups:\n user_ids = [user.id for user in group.users]\n users.update(user_ids)\n if user_id not in users and username not in current_repo.config.editors:\n return render_template(\"permission_ask.html\", authors=post.authors_string)\n\n html = render_post(post)\n raw_post = render_post_raw(post) if raw else None\n\n comments = post.comments\n for comment in comments:\n comment.author = db_session.query(User).filter(User.id == comment.user_id).first().username\n if not raw:\n comment.text = render_comment(comment)\n\n user_obj = (db_session.query(User)\n .filter(User.id == user_id)\n .first())\n\n tags_list = [str(t.name) for t in post.tags]\n user_subscriptions = [str(s) for s in user_obj.get_subscriptions]\n\n is_author = user_id in [author.id for author in post.authors]\n\n rendered = render_template(tmpl,\n html=html,\n post_id=post.id,\n post_path=path,\n raw_post=raw_post,\n comments=comments,\n username=username,\n post_author=post.authors_string,\n title=post.title,\n page_views=post.view_count,\n unique_views=post.view_user_count,\n likes=post.vote_counted_for_user(user_id=user_id),\n total_likes=post.vote_count,\n tags_list=tags_list,\n user_subscriptions=user_subscriptions,\n webeditor_buttons=False,\n web_uri=post.kp.web_uri,\n table_id=None,\n is_private=(post.private == 1),\n is_author=is_author)\n return rendered", "def sentence_parse(list_of_posts): \n for parsedPosts in nlp.pipe(line_review(list_of_posts)):\n for sent in parsedPosts.sents:\n yield str(sent)", "def on_page_markdown(self, markdown, **kwargs):\n for autolink in self.config[\"autolinks\"]:\n markdown = replace_autolink_references(markdown, autolink[\"reference_prefix\"], autolink[\"target_url\"])\n\n return markdown", "def parse_markdown(tokens):\r\n body = Body_Parser(tokens)\r\n if body.consumed != -1 + tokens.length():\r\n if not tokens.grab(body.consumed-1).context == \"EOF\":\r\n list = tokens.grab_num(body.consumed-3, 5)\r\n context = \"\"\r\n for i in list:\r\n context += i.context + \"\\n\"\r\n click.secho(\r\n \"error at %s\\n%s\" % (tokens.grab(body.consumed-1).at, context), fg=\"red\",\r\n err=True)\r\n return body", "def run_get_post(m):\n\n doc = get_doc(m)\n assert doc is not None\n\n wp = get_wp(m)\n\n post = find_post(wp, doc.identifier)\n\n if post:\n post.content = \"…content elided…\"\n from pprint import pprint\n pprint(post.struct)\n return\n else:\n warn(f\"Didn't find post for identifier {doc.identifier}\")\n return", "def post(section, title=None, filename=None):\n if not os.path.exists(os.path.join(FLATPAGES_ROOT, section)):\n raise CommandError(u\"Section '%s' does not exist\" % section)\n post_date = datetime.datetime.today()\n title = unicode(title) if title else \"Untitled Post\"\n if not filename:\n filename = u\"%s.md\" % slugify(title)\n year = post_date.year\n pathargs = [section, str(year), filename]\n filepath = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), FLATPAGES_ROOT, \"/\".join(pathargs)\n )\n if os.path.exists(filepath):\n raise CommandError(\"File %s exists\" % filepath)\n content = \"\\n\".join(\n [\n u\"title: %s\" % title,\n u\"date: %s\" % post_date.strftime(\"%Y-%m-%d\"),\n u\"published: false\\n\\n\",\n ]\n )\n try:\n codecs.open(filepath, \"w\", encoding=\"utf8\").write(content)\n print(u\"Created %s\" % filepath)\n except Exception as error:\n raise CommandError(error)", "def html_from_markdown(content): \n\n \"\"\"\n Bold \n \"\"\" \n # Convert to <strong></strong>\n regx = re.compile(r\"^\\*\\*(.*?)\\*\\*\", re.MULTILINE)\n content = regx.sub(r\"<strong>\\1</strong>\",content) \n\n \"\"\"\n Link \n \"\"\" \n # Convert to <a>\n regx = re.compile(r\"\\[(.*)\\]\\((.*)\\)\", re.MULTILINE)\n content = regx.sub(r\"<a href=\\2>\\1</a>\",content) \n\n \"\"\"\n Paragraph \n \"\"\" \n new_content = \"\"\n for line in content.splitlines():\n line = re.sub(r'^(?!#|\\*)(.+)', r'<p>\\1</p>', line)\n new_content = new_content + line + \"\\n\"\n content = new_content\n\n \"\"\"\n Unordered lists\n \"\"\" \n new_content = \"\" \n u_list = False\n for line in content.splitlines():\n\n if len(line) > 0: # Check the line is not empty\n\n l = line[:2]\n if u_list and l!=\"* \": # check if there and unordered list to be closed.\n new_content = new_content + \"</ul>\"\n u_list = False # Flag indicates the unordered list has finished\n\n #if line[0]!=\"#\" and line[0]!=\"*\": # Add the paragraph to the line\n # line = \"<p>\" + line + \"</p>\\n\"\n\n if line[:2]==\"* \": # Check if the lins is an unordered list\n if not u_list: # Check if it´s the first item of the list\n line = \"<ul><li>\" + line [2:] + \"</li>\"\n u_list = True # Flag indicates the unordered list has started.\n else:\n line = \"<li>\" + line [2:] + \"</li>\"\n\n new_content = new_content + line + \"\\n\"\n\n if u_list : # in case still have an unordered list to be closed.\n new_content = new_content + \"</ul>\"\n\n content = new_content\n\n \"\"\"\n Headers \n \"\"\" \n # Convert to h1\n regx = re.compile(r\"^#\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h1>\\1</h1>\\n\",content) \n\n # Convert to h2\n regx = re.compile(r\"^##\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h2>\\1</h2>\\n\",content) \n\n # Convert to h3\n regx = re.compile(r\"^###\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h3>\\1</h3>\\n\",content) \n\n # Convert to h4\n regx = re.compile(r\"^####\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h4>\\1</h4>\\n\",content) \n\n # Convert to h5\n regx = re.compile(r\"^#####\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h5>\\1</h5>\\n\",content) \n\n # Convert to h6\n regx = re.compile(r\"^######\\s(.*?)\\n\", re.MULTILINE) \n content = regx.sub(r\"<h6>\\1</h6>\\n\",content) \n\n\n return content", "def add(self, posts):\n for post in posts:\n self._feed.add(FeedEntry(\n summary=post.summary,\n title=post.title,\n title_type='html',\n url=post.url,\n updated=post.date,\n ))", "def post_archive(*args, **kwargs):\n return Post.objects.get_blog_posts().dates('publication_date', 'month')", "def post(title, drafts=False):\n # Parsing options\n if drafts:\n dest = _drafts_dest\n else:\n dest = _posts_dest\n\n # File name\n date = get_date()\n name = sanitize(title)\n fname = \"{}-{}{}\".format(date, name, _post_ext)\n\n # Front Matter\n front_matter = []\n front_matter.append('---')\n front_matter.append('layout: post')\n front_matter.append('title: {}'.format(title))\n front_matter.append('description: \"\"')\n front_matter.append('tags: \"\"')\n front_matter.append('comments: true')\n front_matter.append('permalink: \"\"')\n front_matter.append('sitemap:\\n lastmod: {}'.format(date))\n front_matter.append('---')\n\n # Create post file and write Front Matter\n print(\"\\nCreating new post '{}' in {}\\n\".format(fname, dest))\n try:\n f = open(dest + fname, 'w')\n except Exception as e:\n print(\"* [Error] occured: {}\\n\".format(e))\n else:\n f.write('\\n'.join(front_matter))\n f.close()\n print(\"* Done.\\n\")", "def parse(content):\r\n soup = BeautifulSoup(content)\r\n submissions = soup.findAll('div')\r\n\r\n submission_data = []\r\n for s in submissions:\r\n t = s.getText()\r\n try:\r\n num, date, score = num_date_pattern.findall(t)[0]\r\n num = s.find('b').getText()\r\n data = json.loads(unescape(s.find('pre').getText()))\r\n d = {'submission_num': num,\r\n 'date': date,\r\n 'score': score,\r\n 'detail': data}\r\n submission_data.append(d)\r\n except Exception, e:\r\n logging.error(\"Error finding num_date_pattern: %s. Text:\\n%s\" % (e, t))\r\n return submission_data", "def load_post(path: Path) -> LocalPost:\n md_path = path / \"post.md\"\n if not md_path.exists() or not md_path.is_file():\n raise ValueError(f\"Could not find a post.md file in {path.absolute()}\")\n meta_path = path / \"post-meta.json\"\n if not meta_path.exists() or not meta_path.is_file():\n raise ValueError(f\"Could not find a post-meta.json file in {path.absolute()}\")\n try:\n post_meta = PostMeta.parse_from_file(meta_path)\n except ValueError as e:\n raise e\n return LocalPost(path, post_meta)", "def iter_markdown_lines(markdown_html):\n nodes = get_markdown_element_tree(markdown_html)\n\n for node in nodes:\n if node.nodeType == node.ELEMENT_NODE:\n if (node.tagName == 'div' and\n node.attributes.get('class', 'codehilite')):\n # This is a code block, which will consist of a bunch of lines\n # for the source code. We want to split that up into\n # individual lines with their own <pre> tags.\n for line in node.toxml().splitlines():\n yield '<pre>%s</pre>' % line\n elif node.tagName in ('ul', 'ol'):\n # This is a list. We'll need to split all of its items\n # into individual lists, in order to retain bullet points\n # or the numbers.\n #\n # For the case of numbers, we can set each list to start\n # at the appropriate number so that they don't all say \"1.\"\n i = node.attributes.get('start', 1)\n\n for child_node in node.childNodes:\n if (child_node.nodeType == child_node.ELEMENT_NODE and\n child_node.tagName == 'li'):\n # This is a list item element. It may be multiple\n # lines, but we'll have to treat it as one line.\n yield '<%s start=\"%s\">%s</%s>' % (\n node.tagName, i, child_node.toxml(),\n node.tagName)\n\n i += 1\n elif node.tagName == 'p':\n # This is a paragraph, possibly containing multiple lines.\n for line in node.toxml().splitlines():\n yield line\n else:\n # Whatever this is, treat it as one block.\n yield node.toxml()\n elif node.nodeType == node.TEXT_NODE:\n # This may be several blank extraneous blank lines, due to\n # Markdown's generation from invisible markup like fences.\n # We want to condense this down to one blank line.\n yield '\\n'", "def serializePostsData(influencer, posts, length_limit=30, highlight=False):\n from debra import serializers\n\n posts_data = []\n urls = set()\n posts = list(posts)\n dated = []\n undated = []\n for post in posts:\n if post.create_date:\n dated.append(post)\n else:\n undated.append(post)\n\n posts = sorted(dated, key=lambda x: x.create_date)\n posts.reverse()\n posts.extend(undated)\n\n if length_limit:\n length_limit = length_limit\n\n for post in posts:\n if post.url in urls:\n continue\n urls.add(post.url)\n post_data = {}\n post_data[\"post_image\"] = post.post_image\n stripped_content, images = tagStripper(\n post.content, length_limit=length_limit)\n post_data[\"content\"] = stripped_content\n post_data[\"content_images\"] = images\n post_data[\"url\"] = post.url\n post_data[\"blog_name\"] = serializers.unescape(influencer.blogname if influencer else\\\n post.influencer.blogname)\n post_data[\"title\"] = post.title\n post_data[\"platform\"] = get_post_platform(post)\n if highlight:\n post_data[\"highlight\"] = True\n if post.create_date:\n post_data[\"create_date\"] = post.create_date.strftime(\"%b. %e, %Y\")\n if not influencer:\n post_data['user'] = post.influencer.feed_stamp\n if post.products_json:\n post_data[\"products\"] = post.get_product_json()\n else:\n post_data[\"products\"] = []\n posts_data.append(post_data)\n return posts_data", "def split_markdown(source: str) -> List[Dict[str, str]]:\n cells: List[Dict] = []\n in_code = False\n in_tab = False\n cur_code_mark = None\n cur_tag = None\n cur_src = []\n\n def _add_cell(cur_src: List[str], cells: List[Dict]):\n if cur_src:\n src = '\\n'.join(cur_src).strip()\n if in_code:\n cells.append({\n 'type': 'code',\n 'fence': cur_code_mark,\n 'class': cur_tag,\n 'source': src})\n else:\n if not src and not cur_tag:\n return\n cells.append({'type': 'markdown', 'source': src})\n if cur_tag:\n cells[-1]['class'] = cur_tag\n\n for l in source.splitlines():\n code = common.md_code_fence.match(l)\n tab = common.md_mark_pattern.match(l)\n if code:\n # code can be nested\n if in_tab or (in_code and code.groups()[0] != cur_code_mark):\n cur_src.append(l)\n else:\n _add_cell(cur_src, cells)\n cur_src = []\n cur_code_mark, cur_tag = code.groups()\n in_code ^= True\n elif tab:\n begin = tab.groups()[0] == 'begin_tab'\n end = tab.groups()[0] == 'end_tab'\n if in_code or (not begin and not end):\n cur_src.append(l)\n else:\n _add_cell(cur_src, cells)\n cur_src = []\n if begin:\n cur_tag = tab.groups()[1]\n else:\n cur_tag = None\n in_tab = begin\n else:\n cur_src.append(l)\n _add_cell(cur_src, cells)\n return cells", "def post(id):\n post = get_post(id)\n title = post[\"title\"]\n body = post[\"body\"]\n unicode_body = body.decode(\"utf-8\")\n html_body = markdown.markdown(unicode_body)\n safe_html_body = Markup(html_body)\n context = {\n \"title\": title,\n \"body\": safe_html_body,\n \"url\": url_for(\"post\", id=id)\n }\n return render_template(\"post.html\", **context)", "def extract_instagram_posts(self, nodes):\n posts = []\n for node in nodes:\n try:\n post = dict()\n post['dimensions'] = dict()\n post['dimensions']['width'] = node['node']['dimensions']['width']\n post['dimensions']['height'] = node['node']['dimensions']['height']\n post['user'] = self.extract_owner_details(node['node'][\"owner\"])\n post['postId'] = node['node']['id']\n post['code'] = node['node']['shortcode']\n post['caption'] = node['node']['edge_media_to_caption']['edges'][0]['node']['text'] if len(\n node['node']['edge_media_to_caption']['edges']) > 0 else None\n if post['caption'] is not None:\n post['hashTags'] = [re.sub(r'\\W+', '', word) for word in post['caption'].split() if\n word.startswith(\"#\")]\n else:\n post['hashTags'] = []\n post['comments'] = node['node']['edge_media_to_comment']\n post['likes'] = node['node']['edge_liked_by']\n post['imgSmall'] = node['node'][\"thumbnail_src\"]\n post['imgLarge'] = node['node'][\"display_url\"]\n post['postedAt'] = node['node'][\"taken_at_timestamp\"]\n post['isVideo'] = node['node'][\"is_video\"]\n\n if not set(post['hashTags']).isdisjoint(set(_config['instagram']['excluded'])):\n # contains blocked hashtag, skip\n continue\n\n posts.append(post)\n except KeyError as e:\n log.error(\"Problems parsing post {}\".format(str(e)))\n return posts", "def format_markdown(md_content: str) -> str:\n tag_index = md_content.find('tags')\n if tag_index > -1:\n meta_end = md_content.find(META_SEPARATOR)\n r = format_meta(md_content[:meta_end])\n return r + md_content[meta_end:]\n return md_content", "def convert(md_text):\n # separate by line\n md_text = md_text.split('\\n')\n\n # save the html content for return\n html_text = ''\n\n # begin looping from the first line\n index = -1\n while index < len(md_text) - 1:\n index += 1\n line = md_text[index]\n\n # code segment\n if len(line) >= 3 and line[:3] == '```':\n html_line = \"\"\n language = line[3:].replace(' ', '')\n if len(language) == 0:\n language = False\n order_index = index + 1\n find_end = False\n while order_index < len(md_text):\n if md_text[order_index][:3] == '```':\n find_end = True\n break\n else:\n temp_line = md_text[order_index]\n temp_line = temp_line.replace('<', '&lt;')\n temp_line = temp_line.replace('>', '&gt;')\n temp_line = temp_line.replace(' ', '&nbsp;')\n html_line += temp_line + '<br />'\n order_index += 1\n\n if find_end:\n # if language is not False:\n # html_text += ('<pre><code class=\"' + language + '\">' + html_line + '</code></pre>')\n # else:\n html_text += ('<code>' + html_line + '</code>')\n # print(language)\n index = order_index\n continue\n\n # inline code\n\n\n # header\n is_header, html_line = check_header(line)\n if is_header:\n html_text = html_text + html_line\n continue\n\n # horizontal rule\n is_horizontal_rule, html_line = check_horizontal_rule(line)\n if is_horizontal_rule:\n html_text = html_text + html_line\n continue\n\n # paragraph\n line = check_paragraph(line)\n\n # deal with ordered list\n if len(line.split('.')) != 0 and '1.' == line[:2]:\n html_line = '<ol>'\n order_index = index\n while order_index < len(md_text)\\\n and len(md_text[order_index].split('.')) != 0\\\n and (str(order_index - index + 1) == md_text[order_index].split('.')[0]\n or '1' == md_text[order_index].split('.')[0]):\n to_replace = [str(order_index - index + 1) + '.', '1.']\n for replace_content in to_replace:\n md_text[order_index] = md_text[order_index].replace(replace_content, '')\n html_line = html_line + '<li>' + md_text[order_index] + '</li>'\n\n order_index += 1\n index = order_index - 1\n html_line = html_line + '</ol>'\n line = html_line\n\n # deal with unordered list\n is_unordered_list, html_line = check_unordered_list(line)\n if is_unordered_list:\n line = html_line\n\n # deal with strong\n line = strong(line)\n\n # Scratch\n line = scratch(line)\n\n # italics\n line = italics(line)\n\n # image\n while len(re.match(r'((?P<pre_text>.*)!\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line).group())\\\n != 0:\n match = re.match(r'((?P<pre_text>.*)!\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line)\n pre_text = match.group('pre_text')\n alt_text = match.group('alt_text')\n link = match.group('link')\n after_text = match.group('after_text')\n img_html = '<img src=\"' + link + '\" alt=\"' + alt_text + '\">'\n line = pre_text + img_html + after_text\n\n # link\n while len(re.match(r'((?P<pre_text>.*)\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line).group())\\\n != 0:\n match = re.match(r'((?P<pre_text>.*)\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line)\n pre_text = match.group('pre_text')\n alt_text = match.group('alt_text')\n link = match.group('link')\n after_text = match.group('after_text')\n img_html = '<a href=\"' + link + '\">' + alt_text + '</a>'\n line = pre_text + img_html + after_text\n\n html_text = html_text + line\n if not is_unordered_list:\n html_text = html_text + '<br>'\n\n return html_text", "def parse_post(post):\n # Ignore posts less than 24 hours old\n if time.time() - post.created < 60 * 60 * 24:\n logger.debug('Ignoring post (too new)')\n return False\n\n # Add post to database\n postid_db = db.insert('Posts',\n (None,\n post.id,\n post.title,\n post.url,\n post.selftext,\n post.author,\n post.permalink,\n post.subreddit,\n post.num_comments,\n post.upvotes,\n post.downvotes,\n post.score,\n post.created_utc,\n int(post.is_self),\n int(post.over_18)))\n # If post already exists, we've already indexed it; skip!\n if postid_db == -1:\n logger.debug('Ignoring post (already indexed)')\n return False\n # Write post to DB so we don't hit it again\n\n # NOTE: postid_db is the ID of the post in the database; NOT on reddit\n\n # Check for self-post\n if post.selftext != '':\n urls = get_links_from_body(post.selftext)\n for url in urls:\n parse_url(url, postid=postid_db)\n else:\n # Attempt to retrieve hash(es) from link\n parse_url(post.url, postid=postid_db)\n\n # Iterate over top-level comments\n if post.num_comments > 0:\n reddit.fetch_comments(post)\n for comment in post.comments:\n parse_comment(comment, postid_db)", "def produce_aggregated_post_page(name, lp, this, prev, next, main_page=\"Blog\",\n rst_links_up=None, rst_links_down=None,\n index_terms=None, bold_title=None, language=\"en\"):\n direction = \"|rss_image| \"\n if prev is not None:\n direction += f\":ref:`<== <{prev}>` \"\n if bold_title is not None:\n if len(direction) > 0:\n direction += \" \"\n direction += f\"**{bold_title}**\"\n if next is not None:\n if len(direction) > 0:\n direction += \" \"\n direction += f\":ref:`==> <{next}>`\"\n arrows = direction\n if main_page is not None:\n if len(direction) > 0:\n direction += \" \"\n direction += f\":ref:`{main_page} <ap-main-0>`\"\n if rst_links_up is not None:\n if len(direction) > 0:\n direction += \" \"\n direction += \" \".join(rst_links_up)\n\n rows = []\n rows.append(\"\")\n rows.append(\":orphan:\")\n rows.append(\"\")\n rows.append(direction)\n rows.append(\"\")\n rows.append(\".. |rss_image| image:: feed-icon-16x16.png\")\n rows.append(\" :target: ../_downloads/rss.xml\")\n rows.append(\" :alt: RSS\")\n rows.append(\"\")\n rows.append(\"----\")\n rows.append(\"\")\n\n if index_terms is not None:\n rows.append(\"\")\n rows.append(\".. index:: \" + \",\".join(index_terms))\n rows.append(\"\")\n\n rows.append(\"\")\n rows.append(f\".. _{this}:\")\n rows.append(\"\")\n\n if bold_title is not None:\n rows.append(bold_title)\n rows.append(\"+\" * len(bold_title))\n rows.append(\"\")\n\n for post in lp:\n text = post.post_as_rst(language=language, cut=True)\n rows.append(text)\n rows.append(\"\")\n rows.append(\"\")\n\n rows.append(\"\")\n rows.append(\"----\")\n rows.append(\"\")\n if rst_links_down is not None:\n if len(arrows) > 0:\n arrows += \" \"\n arrows += \" \".join(rst_links_down)\n rows.append(arrows)\n\n return \"\\n\".join(rows)", "def postList(posts):\n post_list = list()\n for post in posts:\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list", "def _split(self):\n text = self.md\n self.parts = parts = []\n self.headers = headers = []\n lines = []\n\n # Split in parts\n for line in text.splitlines():\n if line.startswith((\"# \", \"## \", \"### \", \"#### \", \"##### \")):\n # Finish pending lines\n parts.append(\"\\n\".join(lines))\n lines = []\n # Process header\n level = len(line.split(\" \")[0])\n title = line.split(\" \", 1)[1]\n title_short = title.split(\"(\")[0].split(\"<\")[0].strip().replace(\"`\", \"\")\n headers.append((level, title_short))\n parts.append((level, title_short, title))\n else:\n lines.append(line)\n parts.append(\"\\n\".join(lines))\n\n # Now convert all text to html\n for i in range(len(parts)):\n if not isinstance(parts[i], tuple):\n parts[i] = markdown.markdown(parts[i], extensions=[]) + \"\\n\\n\"", "def gen_md(self):\n # https://pythonhosted.org/Markdown/extensions/index.html\n extensions = ['extra', 'codehilite', 'admonition',\n 'toc', 'smarty', 'sane_lists', 'wikilinks']\n # TODO\n extension_configs = {'toc': {\n 'anchorlink': False,\n 'permalink': False\n }\n }\n output_format = 'html5'\n md = markdown.Markdown(extensions=extensions,\n extension_configs=extension_configs,\n output_format=output_format)\n html = md.convert(self.md)\n toc = getattr(md, 'toc', '')\n if toc:\n toc = process_toc(toc)\n return html, toc", "def htmlForMarkdown(md):\n return mdProcessor.convert(md)", "def rmd_to_cells(rmd_string):\n cells, cell_lines, cell_type, in_block, in_begin = [], [], \"markdown\", False, False\n for line in rmd_string.split(\"\\n\"):\n if in_block and (line.strip() == \"```\" or re.match(END_REGEX, line)):\n in_block = False\n\n # collect cell_lines into a new cell\n cell = create_cell(cell_type, \"\\n\".join(cell_lines + [line]))\n cells.append(cell)\n cell_type, cell_lines = \"markdown\", []\n\n elif line.startswith(\"```\") or re.match(BEGIN_REGEX, line):\n in_block = True\n\n # collect cell_lines into a new cell\n if cell_lines:\n cell = create_cell(cell_type, \"\\n\".join(cell_lines))\n cells.append(cell)\n cell_type = \"code\" if line.startswith(\"```{r\") and \"}\" in line else \"markdown\"\n cell_lines = [line]\n\n else:\n cell_lines.append(line)\n\n # collect remaining cell lines into a new cell\n if cell_lines:\n cell = create_cell(cell_type, \"\\n\".join(cell_lines))\n cells.append(cell)\n\n return cells", "def post_create(faker_obj, profile_obj, tag_list, num=3):\n for i in range(num):\n obj = faker_obj\n title = obj.sentence(nb_words=random.randint(5, 10))\n author = User.objects.get(id=profile_obj)\n body = \" \".join(obj.paragraphs(nb=random.randint(8, 20)))\n status = \"published\"\n post = Post.objects.create(title=title, author=author, body=body, status=status)\n post.tags.add(\", \".join(random.sample(tag_list, 1)))\n print(\n \"Created post title:'{}' for user '{}'\".format(post.title, author.username)\n )\n create_comment_list(obj, post)", "def process_postlist(app, doctree, docname):\n blog = Blog(app)\n if not blog:\n register_posts(app)\n for node in doctree.findall(PostList):\n colls = []\n for cat in [\"tags\", \"author\", \"category\", \"location\", \"language\"]:\n for coll in node[cat]:\n if coll in blog.catalogs[cat].collections:\n colls.append(blog.catalogs[cat].collections[coll])\n if colls:\n posts = set(blog.posts)\n for coll in colls:\n posts = posts & set(coll)\n posts = list(posts)\n posts.sort(reverse=True)\n posts = posts[: node.attributes[\"length\"]]\n else:\n posts = list(blog.recent(node.attributes[\"length\"], docname, **node.attributes))\n if node.attributes[\"sort\"]:\n posts.sort() # in reverse chronological order, so no reverse=True\n fmts = list(Formatter().parse(node.attributes[\"format\"]))\n not_in = {\"date\", \"title\", \"author\", \"location\", \"language\", \"category\", \"tags\", None}\n for text, key, __, __ in fmts:\n if key not in not_in:\n raise KeyError(f\"{key} is not recognized in postlist format\")\n excerpts = node.attributes[\"excerpts\"]\n expand = node.attributes[\"expand\"]\n date_format = node.attributes[\"date\"] or _(blog.post_date_format_short)\n bl = nodes.bullet_list()\n bl.attributes[\"classes\"].append(\"postlist-style-\" + node[\"list-style\"])\n bl.attributes[\"classes\"].append(\"postlist\")\n for post in posts:\n bli = nodes.list_item()\n bli.attributes[\"classes\"].append(\"ablog-post\")\n bl.append(bli)\n par = nodes.paragraph()\n bli.append(par)\n for text, key, __, __ in fmts:\n if text:\n par.append(nodes.Text(text))\n if key is None:\n continue\n if key == \"date\":\n par.append(nodes.Text(post.date.strftime(date_format)))\n else:\n if key == \"title\":\n items = [post]\n else:\n items = getattr(post, key)\n\n for i, item in enumerate(items, start=1):\n if key == \"title\":\n ref = nodes.reference()\n if item.options.get(\"external_link\"):\n ref[\"refuri\"] = post.options.get(\"external_link\")\n else:\n ref[\"refuri\"] = app.builder.get_relative_uri(docname, item.docname)\n ref[\"internal\"] = True\n ref[\"ids\"] = []\n ref[\"backrefs\"] = []\n ref[\"dupnames\"] = []\n ref[\"classes\"] = []\n ref[\"names\"] = []\n ref.append(nodes.Text(str(item)))\n par.attributes[\"classes\"].append(\"ablog-post-title\")\n else:\n ref = _missing_reference(app, item.xref, docname)\n par.append(ref)\n if i < len(items):\n par.append(nodes.Text(\", \"))\n if excerpts and post.excerpt:\n for enode in post.excerpt:\n enode = enode.deepcopy()\n enode.attributes[\"classes\"].append(\"ablog-post-excerpt\")\n revise_pending_xrefs(enode, docname)\n app.env.resolve_references(enode, docname, app.builder)\n enode.parent = bli.parent\n bli.append(enode)\n if expand:\n ref = app.builder.get_relative_uri(docname, post.docname)\n enode = nodes.paragraph()\n enode.attributes[\"classes\"].append(\"ablog-post-expand\")\n refnode = nodes.reference(\"\", \"\", internal=True, refuri=ref)\n innernode = nodes.emphasis(text=expand)\n refnode.append(innernode)\n enode.append(refnode)\n bli.append(enode)\n node.replace_self(bl)", "def render_markdown(text):\n return markdown(text, **MARKDOWN_KWARGS)", "def dc2fields(file):\r\n try:\r\n from bs4 import BeautifulSoup\r\n except ImportError:\r\n error = ('Missing dependency '\r\n '\"BeautifulSoup4\" and \"lxml\" required to import Dotclear files.')\r\n sys.exit(error)\r\n\r\n\r\n in_cat = False\r\n in_post = False\r\n category_list = {}\r\n posts = []\r\n\r\n with open(file, 'r', encoding='utf-8') as f:\r\n\r\n for line in f:\r\n # remove final \\n\r\n line = line[:-1]\r\n\r\n if line.startswith('[category'):\r\n in_cat = True\r\n elif line.startswith('[post'):\r\n in_post = True\r\n elif in_cat:\r\n fields = line.split('\",\"')\r\n if not line:\r\n in_cat = False\r\n else:\r\n # remove 1st and last \"\"\r\n fields[0] = fields[0][1:]\r\n # fields[-1] = fields[-1][:-1]\r\n category_list[fields[0]]=fields[2]\r\n elif in_post:\r\n if not line:\r\n in_post = False\r\n break\r\n else:\r\n posts.append(line)\r\n\r\n print(\"%i posts read.\" % len(posts))\r\n\r\n for post in posts:\r\n fields = post.split('\",\"')\r\n\r\n # post_id = fields[0][1:]\r\n # blog_id = fields[1]\r\n # user_id = fields[2]\r\n cat_id = fields[3]\r\n # post_dt = fields[4]\r\n # post_tz = fields[5]\r\n post_creadt = fields[6]\r\n # post_upddt = fields[7]\r\n # post_password = fields[8]\r\n # post_type = fields[9]\r\n post_format = fields[10]\r\n # post_url = fields[11]\r\n # post_lang = fields[12]\r\n post_title = fields[13]\r\n post_excerpt = fields[14]\r\n post_excerpt_xhtml = fields[15]\r\n post_content = fields[16]\r\n post_content_xhtml = fields[17]\r\n # post_notes = fields[18]\r\n # post_words = fields[19]\r\n # post_status = fields[20]\r\n # post_selected = fields[21]\r\n # post_position = fields[22]\r\n # post_open_comment = fields[23]\r\n # post_open_tb = fields[24]\r\n # nb_comment = fields[25]\r\n # nb_trackback = fields[26]\r\n post_meta = fields[27]\r\n # redirect_url = fields[28][:-1]\r\n\r\n # remove seconds\r\n post_creadt = ':'.join(post_creadt.split(':')[0:2])\r\n\r\n author = \"\"\r\n categories = []\r\n tags = []\r\n\r\n if cat_id:\r\n categories = [category_list[id].strip() for id in cat_id.split(',')]\r\n\r\n # Get tags related to a post\r\n tag = post_meta.replace('{', '').replace('}', '').replace('a:1:s:3:\\\\\"tag\\\\\";a:', '').replace('a:0:', '')\r\n if len(tag) > 1:\r\n if int(tag[:1]) == 1:\r\n newtag = tag.split('\"')[1]\r\n tags.append(\r\n BeautifulSoup(\r\n newtag\r\n , \"xml\"\r\n )\r\n # bs4 always outputs UTF-8\r\n .decode('utf-8')\r\n )\r\n else:\r\n i=1\r\n j=1\r\n while(i <= int(tag[:1])):\r\n newtag = tag.split('\"')[j].replace('\\\\','')\r\n tags.append(\r\n BeautifulSoup(\r\n newtag\r\n , \"xml\"\r\n )\r\n # bs4 always outputs UTF-8\r\n .decode('utf-8')\r\n )\r\n i=i+1\r\n if j < int(tag[:1])*2:\r\n j=j+2\r\n\r\n \"\"\"\r\n dotclear2 does not use markdown by default unless you use the markdown plugin\r\n Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown\r\n \"\"\"\r\n if post_format == \"markdown\":\r\n content = post_excerpt + post_content\r\n else:\r\n content = post_excerpt_xhtml + post_content_xhtml\r\n content = content.replace('\\\\n', '')\r\n post_format = \"html\"\r\n\r\n kind = 'article' # TODO: Recognise pages\r\n\r\n yield (post_title, content, slugify(post_title), post_creadt, author,\r\n categories, tags, kind, post_format)", "def create_multiple_posts(author, num, ptext = TEXT, visibility = ACL_DEFAULT):\n posts = []\n\n for i in range(num):\n posts.append(Post.objects.create(content = ptext, author = author, visibility=visibility))\n\n return posts", "def markdown_cells(notebook):\n cells = all_cells(notebook)\n return [cell[\"source\"] for cell in cells if cell[\"cell_type\"] == \"markdown\"]", "def articles():\n entries = []\n cur = g.db.execute(\n \"\"\"\n SELECT entries.location FROM categories\n INNER JOIN entries ON\n entries.slug = categories.slug AND\n entries.published = categories.published\n WHERE categories.category='{category}'\n ORDER BY entries.published DESC\n \"\"\".format(category='article'))\n\n for (row,) in cur.fetchall():\n if os.path.exists(row+\".md\"):\n entries.append(file_parser(row+\".md\"))\n return render_template('blog_entries.html', entries=entries)", "def get_all_posts(self, *fields):\n if fields:\n posts = self.collection.find(projection=fields)\n else:\n posts = self.collection.find()\n\n for post in posts.sort('created_datetime', -1):\n yield BlogPost(\n title=post['title'],\n content=post['content'],\n created_datetime=post['created_datetime']\n )", "def get_posts(account, pages=10, timeout=5, sleep=0):\n\n url = f'{_base_url}/{account}/posts/'\n\n session = HTMLSession()\n session.headers.update({'Accept-Language': 'en-US,en;q=0.5'})\n\n response = session.get(url, timeout=timeout)\n html = response.html\n cursor_blob = html.html\n\n while True:\n for article in html.find('article'):\n yield _extract_post(article)\n\n pages -= 1\n if pages == 0:\n return\n\n cursor = _find_cursor(cursor_blob)\n next_url = f'{_base_url}{cursor}'\n\n if sleep:\n time.sleep(sleep)\n\n try:\n response = session.get(next_url, timeout=timeout)\n response.raise_for_status()\n data = json.loads(response.text.replace('for (;;);', '', 1))\n except (RequestException, ValueError):\n return\n\n for action in data['payload']['actions']:\n if action['cmd'] == 'replace':\n html = HTML(html=action['html'], url=_base_url)\n elif action['cmd'] == 'script':\n cursor_blob = action['code']", "def on_frontmatter_loaded(self, source_file, frontmatter):\n if not self._is_post(frontmatter):\n return\n self._validate_post(source_file, frontmatter)\n post = BlogPost(\n date=frontmatter['date'],\n source_file=source_file,\n summary=frontmatter.get('summary', ''),\n title=frontmatter['title'],\n route=self._resolver.as_route(source_file),\n url=self._resolver.as_url(source_file),\n posts=self.posts,\n )\n frontmatter['post'] = post\n if post != self.posts.get(source_file):\n self.posts[source_file] = post\n self._should_generate = True", "def feed2fields(file):\r\n import feedparser\r\n d = feedparser.parse(file)\r\n for entry in d.entries:\r\n date = (time.strftime(\"%Y-%m-%d %H:%M\", entry.updated_parsed)\r\n if hasattr(entry, \"updated_parsed\") else None)\r\n author = entry.author if hasattr(entry, \"author\") else None\r\n tags = [e['term'] for e in entry.tags] if hasattr(entry, \"tags\") else None\r\n\r\n slug = slugify(entry.title)\r\n kind = 'article'\r\n yield (entry.title, entry.description, slug, date, author, [], tags,\r\n kind, \"html\")", "def markdown(value):\n return Markup(md(value))", "def parse_posts(posts_dict):\n return posts_dict['posts']", "def each_comment_from_post(post):\n # first yield the post text body, if any\n if post['text']:\n yield post['text']\n # then yield each comment\n for comment in post['comments']:\n yield comment['text']", "def add_post(content):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n cleaned = bleach.clean(content, strip = True)\n c.execute(\"insert into posts values(%s)\", (cleaned,))\n db.commit()\n db.close()", "def Translate(self, infile, outfile, mapfile):\r\n # Create the top-level feed object\r\n feed = []\r\n comments = []\r\n\r\n # Calculate the last updated time by inspecting all of the posts\r\n last_updated = 0\r\n\r\n # These three variables keep the state as we parse the file\r\n post_entry = {} # The current post atom.Entry to populate\r\n comment_entry = {} # The current comment atom.Entry to populate\r\n last_entry = None # The previous post atom.Entry if exists\r\n tag_name = None # The current name of multi-line values\r\n tag_contents = '' # The contents of multi-line values\r\n\r\n # Loop through the text lines looking for key/value pairs\r\n split_re = re.compile('^[A-Z ]+:')\r\n for line in infile:\r\n\r\n # Remove whitespace\r\n line = line.strip()\r\n\r\n # Check for the post ending token\r\n if line == '-' * 8 and tag_name != 'BODY':\r\n if post_entry:\r\n # Add the post to our feed\r\n sys.stderr.write(\"Adding post %s\\n\" % post_entry['title'])\r\n self.add_to_user_map(post_entry.get('author'), post_entry.get('authorEmail'))\r\n feed.insert(0, post_entry)\r\n last_entry = post_entry\r\n\r\n # Reset the state variables\r\n post_entry = {}\r\n comment_entry = {}\r\n tag_name = None\r\n tag_contents = ''\r\n continue\r\n\r\n # Check for the tag ending separator\r\n elif line == '-' * 5:\r\n # Get the contents of the body and set the entry contents\r\n if tag_name == 'BODY':\r\n post_entry['description'] = self._Encode(tag_contents)\r\n\r\n # This is the start of the COMMENT section. Create a new entry for\r\n # the comment and add a link to the original post.\r\n elif tag_name == 'COMMENT':\r\n comment_entry['body'] = self._Encode(tag_contents)\r\n post_entry.setdefault('comments', []).append(comment_entry)\r\n self.add_to_user_map(comment_entry.get('author'), comment_entry.get('authorEmail'))\r\n comment_entry = {}\r\n\r\n # Get the contents of the extended body\r\n elif tag_name == 'EXTENDED BODY':\r\n if post_entry:\r\n post_entry['mt_text_more'] = self._Encode(tag_contents)\r\n elif last_entry:\r\n last_entry['mt_text_more'] = self._Encode(tag_contents)\r\n\r\n # Convert any keywords (comma separated values) into Blogger labels\r\n elif tag_name == 'KEYWORDS':\r\n post_entry['mt_keywords'] = tag_contents\r\n\r\n # Reset the current tag and its contents\r\n tag_name = None\r\n tag_contents = ''\r\n continue\r\n\r\n # Split the line into key/value pairs\r\n key = line\r\n value = ''\r\n if split_re.match(line):\r\n elems = line.split(':')\r\n key = elems[0]\r\n if len(elems) > 1:\r\n value = ':'.join(elems[1:]).strip()\r\n\r\n # The author key indicates the start of a post as well as the author of\r\n # the post entry or comment\r\n if key == 'AUTHOR':\r\n # Create a new entry \r\n entry = {}\r\n\r\n # Add the author's name\r\n author_name = self._Encode(value)\r\n if not author_name:\r\n author_name = 'Anonymous'\r\n entry['author'] = author_name\r\n\r\n # Add the appropriate kind, either a post or a comment\r\n if tag_name == 'COMMENT':\r\n entry['postid'] = post_entry['postid']\r\n comment_entry = entry\r\n else:\r\n entry['postid'] = 'post-' + self._GetNextId()\r\n post_entry = entry\r\n\r\n # The title only applies to new posts\r\n elif key == 'TITLE' and tag_name != 'PING':\r\n post_entry['title'] = self._Encode(value)\r\n\r\n # If the status is a draft, mark it as so in the entry. If the status\r\n # is 'Published' there's nothing to do here\r\n elif key == 'STATUS':\r\n post_entry['status'] = value\r\n\r\n # Turn categories into labels\r\n elif key == 'CATEGORY':\r\n post_entry.setdefault('category', []).append(value)\r\n\r\n # Convert the date and specify it as the published/updated time\r\n elif key == 'DATE' and tag_name != 'PING':\r\n entry = post_entry\r\n if tag_name == 'COMMENT':\r\n entry = comment_entry\r\n entry['dateCreated'] = value\r\n\r\n # Check to see if this was the last post published (so far)\r\n # seconds = time.mktime(time_val)\r\n # last_updated = max(seconds, last_updated)\r\n\r\n # Convert all tags into Blogger labels\r\n elif key == 'TAGS':\r\n post_entry.setdefault('tags', []).append(value)\r\n\r\n # Update the author's email if it is present and not empty\r\n elif tag_name == 'COMMENT' and key == 'EMAIL':\r\n comment_entry['authorEmail'] = value\r\n\r\n # Update the author's URI if it is present and not empty\r\n elif tag_name == 'COMMENT' and key == 'URL':\r\n comment_entry['authorUrl'] = value\r\n\r\n # If any of these keys are used, they contain information beyond this key\r\n # on following lines\r\n elif key in ('COMMENT', 'BODY', 'EXTENDED BODY', 'EXCERPT', 'KEYWORDS', 'PING'):\r\n tag_name = key\r\n\r\n # These lines can be safely ignored\r\n elif key in ('BASENAME', 'ALLOW COMMENTS', 'CONVERT BREAKS', \r\n 'ALLOW PINGS', 'PRIMARY CATEGORY', 'IP', 'URL', 'EMAIL'):\r\n continue\r\n\r\n # If the line is empty and we're processing the body, add a line break\r\n elif (tag_name == 'BODY' or tag_name == 'EXTENDED BODY' or tag_name == 'COMMENT') and len(line) == 0:\r\n tag_contents += '\\n'\r\n\r\n # This would be a line of content beyond a key/value pair\r\n elif len(key) != 0:\r\n tag_contents += line + '\\n'\r\n\r\n\r\n # Update the feed with the last updated time\r\n # feed.updated = atom.Updated(self._ToBlogTime(time.gmtime(last_updated)))\r\n\r\n # Serialize the feed object\r\n yaml.dump(feed, outfile, Dumper=yaml.CDumper)\r\n \r\n # Write out the user map\r\n user_map_dict = {}\r\n for name, email in self.user_map:\r\n user_map_dict[name] = email\r\n yaml.dump(user_map_dict, mapfile, Dumper=yaml.CDumper)", "def process_markdown(input_markdown, output_name, latex_img_dir = \"./\", input_path = \"./\", thumb_size=64):\n\tmd = markdown.Markdown( extensions=[ 'meta'\n\t , 'codehilite'\n\t , 'tables'\n\t , 'def_list'\n\t , 'footnotes'\n\t , ResourceExtractor({ \"resource_dir\": output_name\n\t , \"relative_path\": input_path\n\t })\n\t , AbstractExtractor()\n\t , ToCExtractor()\n\t , MathJaxExtension()\n\t , LaTeX({ \"latex_img_dir\": latex_img_dir\n\t , \"input_path\": input_path\n\t })\n\t ]\n\t )\n\t\n\t# Basic HTML conversion\n\thtml = md.convert(input_markdown)\n\t\n\t# Generate table of contents\n\ttoc = md.toc\n\t\n\t# Choose document title (default to the output name)\n\ttitle = output_name\n\t# Use the first heading if possible\n\tif len(toc) > 0:\n\t\ttitle = toc[0][1]\n\t# Better yet, get the explicitly given metadata\n\ttitle = md.Meta.get(\"title\", [title])[0]\n\t\n\t# Choose document subtitle (only available from metadata)\n\tsubtitle = md.Meta.get(\"subtitle\", [None])[0]\n\t\n\t# Get the image from the metadata\n\timg = md.Meta.get(\"img\", [None])[0]\n\timg_alt = md.Meta.get(\"img_alt\", [title])[0]\n\t\n\t# The abstract should be taken to be the first paragraph.\n\tabstract = md.abstract if md.abstract is not None else \"\"\n\t\n\t# Get the list of tags\n\ttags = md.Meta.get(\"tags\", [])\n\t\n\t# Get the list of files to include\n\tincludes = md.Meta.get(\"include\", [])\n\t\n\t# Get the show option\n\tshow = md.Meta.get(\"show\", [\"True\"])[0] == \"True\"\n\t\n\tfiles = md.resources\n\t\n\t# Add the article image to the list of files and create a thumbnail if\n\t# possible.\n\tif img is not None and img.startswith(\"file://\"):\n\t\timg = os.path.join(input_path, img[len(\"file://\"):])\n\t\timg_output_name = \"%s/%s\"%(output_name,\n\t\t unique(os.path.basename(img),\n\t\t [f.split(\"/\")[-1] for (_,f) in files]))\n\t\t\n\t\timg_thumbnail = \"%s.thumb.png\"%img\n\t\t\n\t\tp = Popen( [\"convert\"\n\t\t , img\n\t\t , \"-thumbnail\", \"%dx%d\"%(thumb_size,thumb_size)\n\t\t , img_thumbnail]\n\t\t , stdin = None\n\t\t , stdout = sys.stderr\n\t\t , stderr = sys.stderr\n\t\t )\n\t\tif p.wait() != 0:\n\t\t\traise Exception(\"Creating img thumbnail failed.\")\n\t\t\n\t\tfiles.append((img_thumbnail, img_output_name))\n\t\timg = img_output_name\n\t\n\t# Generate meta-data\n\tmeta_data = {\n\t\t\"url\" : output_name,\n\t\t\"title\" : title,\n\t\t\"subtitle\" : subtitle,\n\t\t\"img\" : img,\n\t\t\"img_alt\" : img_alt,\n\t\t\"abstract\" : abstract,\n\t\t\"tags\" : tags,\n\t\t\"show\" : show,\n\t}\n\t\n\treturn html, toc, meta_data, files, includes", "def _convert_to_fancypants(self, markdown_text: str) -> dict: # noqa: ANN001\n text_data = {\"output_mode\": \"rtjson\", \"markdown_text\": markdown_text}\n return self._reddit.post(API_PATH[\"convert_rte_body\"], data=text_data)[\"output\"]", "def parse_frontmatter_and_strip(self):\n assert self._raw_content\n raw_content = self._raw_content\n\n if raw_content.startswith('---'):\n raw_content = raw_content[3:]\n\n tridash_re = re.compile('^-{3,5}\\s*$', re.MULTILINE)\n m = tridash_re.search(raw_content)\n if m:\n start, end = m.span()\n # start is the 1st dash index\n # end is the index of '\\n' in the same line\n self.frontmatter = raw_content[:start]\n self.md = raw_content[end+1:]\n else:\n self.frontmatter = None\n self.md = raw_content\n if self.frontmatter:\n # strings in fm is unicode or ascii depending on whether\n # the object is an ascii string or not\n fm = yaml.load(self.frontmatter)\n else:\n fm = {}\n self.set_tags(fm)\n self.set_title(fm)\n self.set_category(fm)", "def parse(url=None, html=None, text=None, title=None,\n sentences_count=5,\n options={},\n summarize_algo=\"luhn\",\n date_timezone=\"America/New_York\"):\n\n article = Article(\"\")\n\n if text and title:\n article.is_parsed = True\n article.is_downloaded = True\n article.set_title(title)\n article.set_text(text)\n else:\n if url:\n r = requests.get(url.strip())\n if r.status_code != 200:\n raise Exception(\"Paper request failed '%s'\" % url)\n html = r.content\n\n if html:\n soup = get_soup(html)\n else:\n raise Exception(\"Paper missing HTML content\")\n\n article.set_html(remove_social_embeds(html))\n article.parse()\n article.nlp()\n\n if options.get(\"title_selector\"):\n title = soup.select(options.get(\"title_selector\"))\n if title:\n title = title[0].text\n article.set_title(title)\n\n if options.get(\"image_selector\"):\n img = soup.select(options.get(\"image_selector\"))\n if img:\n img = img[0].text\n article.set_top_img_no_check(img)\n\n if options.get(\"content_selector\"):\n html = soup.select(options.get(\"content_selector\"))\n if html:\n article.set_text(html[0].text)\n\n summary = summarize(text=article.text,\n title=article.title,\n algo=summarize_algo,\n sentences_count=sentences_count)\n publish_date = article.publish_date\n if not publish_date and html:\n publish_date = extract_publish_date(html)\n if not publish_date:\n publish_date = datetime.datetime.now()\n\n return {\n \"url\": article.canonical_link,\n \"title\": article.title,\n \"summary\": summary,\n \"summaries\": summary.split(\"\\n\\n\"),\n \"text\": article.text,\n \"html\": article.html,\n \"top_image\": article.top_image,\n \"images\": article.images,\n \"videos\": list(set(article.movies + extract_video_iframes(html))),\n \"social_media_content\": extract_social_media_content(html),\n \"keywords\": article.keywords,\n \"tags\": article.tags,\n \"authors\": article.authors,\n \"published_date\": datetime_to_local_timezone(publish_date),\n \"md_text\": \"\"\n }", "def parse_container(self, containers: List[bs]):\n today = date.today()\n span_fields = {\"company\", \"location\", \"date\"}\n\n for container in containers:\n post_href = container.find(\"a\", {\"class\": \"jobtitle\"})[\"href\"]\n\n fields = {\n f: find_span(container, \"span\", f) for f in span_fields\n } # type: Dict[str, Any]\n\n fields.update(\n {\n \"date_posted\": parse_date(fields[\"date\"], today),\n \"title\": container.a.text,\n \"date_added_db\": today,\n \"description\": find_span(container, \"div\", class_=\"summary\"),\n \"source\": 1,\n \"link\": f\"https://indeed.com/{post_href}\",\n }\n )\n fields[\"is_sponsored\"] = bool(fields[\"date_posted\"])\n\n self.df = self.df.append(fields, ignore_index=True)", "def create_x_posts(x, subject):\n posts = []\n for counter, value in enumerate(\"abcdefghijklmnopqrstuvwxyz\"):\n posts.append(Post.create(subject=subject, title=(value*10), body=(value*100)))\n posts[counter].save()", "def on_page_markdown(self, markdown, page, config, files):\n listext = self.config['ext']\n src_file_path = page.file.abs_src_path\n prepath, ext = os.path.splitext(src_file_path)\n lang = ext.lstrip('.')\n filename = page.file.name\n if ext in listext:\n new_markdown = \"# {0}\\n\\n```{1}\\n\".format(filename, lang) + markdown + \"\\n```\"\n return new_markdown\n else:\n return markdown", "def markdown_cells(self):\n for cell in self.content.cells:\n if cell.cell_type == \"markdown\" and not cell.source.startswith(NOTEBOOK_HEADER_TAG) \\\n and not cell.source.startswith(NAVBAR_TAG):\n yield cell", "def insert_new_post(post_arg_set):\n api, post_data, acct_data, page_id, config = post_arg_set\n\n try:\n post_id = post_data['id'] if post_data.has_key('id') else None\n\n except Exception as e:\n log.error( e )\n\n else:\n\n # parse date\n if post_data.has_key('created_time') and post_data['created_time'] is not None: \n dt = datetime.strptime(post_data['created_time'], FB_DATE_FORMAT)\n date_time = tz_adj(dt, config)\n time_bucket = round_datetime(date_time, config)\n raw_timestamp = int(date_time.strftime(\"%s\"))\n \n else:\n time_bucket = None\n raw_timestamp = None\n \n # extract message so we can find links within the msg if not in url\n article_urls = [get_fb_link(post_data, config, unshorten=True)]\n message = post_data['message'].encode('utf-8') if post_data.has_key('message') else None\n message_urls = get_message_urls(article_urls, message, config)\n\n # detect article links, unshorten and parse\n article_urls = [\n parse_url(unshorten_link(url, config)) \\\n for url in article_urls + message_urls\n if url is not None\n ]\n\n article_urls = [url for url in article_urls if is_article(url, config)]\n\n if article_urls:\n for article_url in set(article_urls):\n\n # sluggify url\n article_slug = sluggify(article_url)\n\n # format data\n post_value = {\n 'article_slug': article_slug,\n 'article_url': article_url,\n 'time_bucket': time_bucket,\n 'fb_post_created': raw_timestamp,\n 'raw_timestamp': raw_timestamp,\n 'fb_raw_link' : get_fb_link(post_data, config=config),\n 'fb_page_id': page_id,\n 'fb_post_id': post_id,\n 'fb_page_likes': acct_data['likes'] if acct_data.has_key('likes') else None,\n 'fb_page_talking_about': acct_data['talking_about_count'] if acct_data.has_key('talking_about_count') else None,\n 'fb_type': post_data['type'] if post_data.has_key('type') else None,\n 'fb_status_type': post_data['status_type'] if post_data.has_key('status_type') else None,\n 'fb_message': message\n }\n \n # always insert insights data\n if is_insights(page_id, config):\n \n log.info( \"INSIGHTS\\tAdding data from %s re: %s\" % (page_id, article_slug) )\n\n # fetch data\n insights_value = get_insights_data(api, page_id, post_id)\n\n # create datasource name\n data_source = \"facebook_insights_%s\" % page_id \n \n # upsert url\n upsert_url(article_url, article_slug, data_source, config)\n\n # insert id\n db.sadd('facebook_post_ids', post_id)\n\n # format time bucket\n current_time_bucket = gen_time_bucket(config)\n insights_value['time_bucket'] = current_time_bucket\n post_value.pop('time_bucket', None)\n \n value = json.dumps({\n data_source : dict(post_value.items() + insights_value.items())\n })\n\n # upload data to redis\n db.zadd(article_slug, current_time_bucket, value) \n \n # only insert new posts\n if not db.sismember('facebook_post_ids', post_id):\n \n log.info( \"FACEBOOK\\tNew post %s\\t%s\" % (post_id, article_url) )\n \n # insert id\n db.sadd('facebook_post_ids', post_id) \n \n # upsert url\n data_source = \"facebook_%s\" % page_id\n upsert_url(article_url, article_slug, data_source, config)\n\n value = json.dumps( {data_source : post_value} )\n\n\n # upload data to redis\n db.zadd(article_slug, time_bucket, value)", "def get_rss_feed_markdown(feed_key, top_n=None):\n\n # Parse the rss feed\n feed = get_rss_feed(feed_key)\n\n # Create the object output\n feed_str = \"\"\n hr = \"\\n---\\n\" # Horizontal rule\n\n if top_n is None:\n top_n = len(feed.entries)\n elif not (isinstance(top_n, int) and top_n > 0):\n raise ValueError(f\"'top_n' must be an integer value greater than 0. Received: '{top_n}'\")\n\n feed_str += hr\n\n # Loop through entries\n for entry in feed.entries[:top_n]:\n feed_str += f\"{entry['summary']} ([Link]({entry['link']}))\\n\" + hr\n\n return feed_str", "def extract_features_from_args(markdown, args):\n if args.notebooks:\n markdown_l = []\n for notebook in args.notebooks:\n markdown_l.extend(generate_markdown_cells(\n load(notebook), args.pattern\n ))\n markdown += ''.join(markdown_l)\n\n if args.markdowns:\n for mark in args.markdowns:\n with open(mark, 'r') as fil:\n markdown += (\n args.pattern.format(mark)\n + fil.read()\n )\n\n blocks = split_markdown(markdown, args.pattern)\n for block in blocks:\n block['features'] = extract_features(block['code'])\n return blocks", "def process_entry(entry, blog, START):\n try:\n when = entry['updated_parsed']\n except KeyError:\n try:\n when = entry['published_parsed']\n except KeyError:\n return # Ignore undateable posts\n\n if when:\n when = pytz.timezone('UTC').localize(datetime.fromtimestamp(time.mktime(when)))\n else:\n # print blog, entry\n return\n\n if when < START:\n return\n\n title = entry.get('title', \"Null\")\n\n try:\n author = entry['author']\n except KeyError:\n try:\n author = ', '.join(a['name'] for a in entry.get('authors', []))\n except KeyError:\n author = 'Anonymous'\n\n link = entry['link']\n\n try:\n body = entry['content'][0]['value'].replace(\"h1>\",\"b>\").replace(\"h2>\",\"b>\")\n except KeyError:\n body = entry['summary']\n\n return Post(when, blog, title, author, link, body)", "def markdown_links(self):\n return self.findall_markdown_cells(MARKDOWN_LINK)", "def update_post_format(post):\n\n post_dict = {\n \"title\": post[1],\n \"genre\": get_genre(post[0]),\n \"content\": post[2],\n \"repeater_link\": get_links(post[3], post[4]),\n }\n \n return post_dict", "def markdown(self, text):\n\n # Remove rel attributes as they are not supported by html2markdown\n text = re.sub(r' rel=\".+?\">', \">\", text)\n\n # Convert html to markdown\n text = html2markdown.convert(text)\n\n # Decode [<>&] characters\n text = text.replace(\"&lt;\", \"<\").replace(\"&gt;\", \">\").replace(\"&amp;\", \"&\")\n\n # Wrap as Rich Markdown\n return Markdown(text)", "def extract_posts(posts_file, output_filename=direc+\"/posts.txt\"):\r\n if not os.path.exists(output_filename.split(\"/\")[0]):\r\n os.makedirs(output_filename.split(\"/\")[0])\r\n\r\n print(\"Extracting posts from \" + posts_file + \"...\")\r\n posts_dict = {}\r\n with open(output_filename, 'w', encoding=encoding) as f:\r\n current = 0\r\n for event, child in iterparse(posts_file, events=('start', 'end')):\r\n if current > SAMPLE_SIZE:\r\n break\r\n elif len(child.attrib) > 0 and event == \"start\":\r\n line = \"\"\r\n if child.attrib['PostTypeId'] == '1' and 'AcceptedAnswerId' in child.attrib:\r\n posts_dict[child.attrib['Id']] = {'accepted': child.attrib['AcceptedAnswerId'], 'other': []}\r\n clean_title = clean_markdown(child.attrib['Title'])\r\n clean_body = clean_markdown(child.attrib['Body'])\r\n line = child.attrib['Id'] + \"\\t\" + clean_title + \"\\t\" + clean_body + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n current += 1\r\n elif child.attrib['PostTypeId'] == '2':\r\n if child.attrib['ParentId'] in posts_dict and not child.attrib['Id'] == posts_dict[child.attrib['ParentId']]['accepted']:\r\n posts_dict[child.attrib['ParentId']]['other'].append(child.attrib['Id'])\r\n clean_body = clean_markdown(child.attrib['Body'])\r\n line = child.attrib['Id'] + \"\\t\" + child.attrib['ParentId'] + \"\\t\" + clean_body + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n current += 1\r\n f.write(line)\r\n print_progress(current, SAMPLE_SIZE)\r\n print(\"\\nFinished extracting posts from \" + output_filename + \".\\n\")\r\n return posts_dict", "def parse_gulde_news():\n # set url\n url = 'https://web-scraping-demo.zgulde.net/news' \n agent = 'codeup ds germain'\n # query\n response = requests.get(url, headers={'User-Agent': agent}) \n # soup\n soup = BeautifulSoup(response.text) \n # raw list of articles\n articles = soup.select('.grid.gap-y-12 > div') \n # list of dicts for dataframe\n article_list = [] \n # parse each article\n for article in articles: \n # grab title\n title = article.h2.text \n # grab date, author, contents of article\n date, author, contents = article.select('.py-3')[0]\\\n .find_all('p') \n # add dict of info to list\n article_list.append({'title':title, 'date':date.text,\n 'author':author.text, 'contents':contents.text}) \n # return dataframe\n return pd.DataFrame(article_list)", "def get_posts_from_soup(soup, debug=False, show_soup=False):\n\n # post content of each found post\n post_data_json = []\n\n # extract all blocks enclosed by search post bg (followed by a number, therefore using regex)\n search_posts_soup = soup.findAll(\n \"div\", {\"class\": re.compile(\"^(search post bg)\")})\n\n if debug == True:\n print(\"Num Search Posts:\", len(search_posts_soup))\n\n for search_post_soup in search_posts_soup:\n if show_soup == True:\n print(\"$$$$$$ NEW SEARCH POST SOUP $$$$$$$$\")\n print(search_post_soup)\n\n single_post_data = []\n if debug == True:\n print(\"\\n########NEW SEARCH POST###########\")\n\n # Extract Author\n # <dt class=\"author\">von <a class=\"username\" href=\"./memberlist.php?mode=viewprofile&amp;u=_user_id_&amp\n # ;sid=_sid_\">_author_</a></dt>\n\n # class is either username or username-coloured\n author_soup = search_post_soup.find(\n \"a\", {\"class\": re.compile(\"^(username)\")})\n author_info = [\"Author\", author_soup.text.strip(),\n author_soup.get('href').strip()]\n single_post_data.append(author_info)\n if debug == True:\n print(\" author:\", author_info)\n\n attributes_soup = search_post_soup.findAll('dd')\n if debug == True:\n print(\"--------ATTRIBUTES--------\")\n\n # convert attributes / special case for date\n for attribute_soup in attributes_soup:\n\n # Extract & Convert Date\n # <dd class=\"search-result-date\">Montag 15. April 2019, 19:55</dd>\n try:\n if attribute_soup[\"class\"][0] == \"search-result-date\":\n date_string = attribute_soup.text.strip()\n try:\n # Convert Dates of sort Sonntag 9. Juni 2019, 09:00\n date = datetime.strptime(\n date_string, '%A %d. %B %Y, %H:%M')\n except:\n date = datetime(1900, 1, 1)\n date_info = [\"Datum\", date, None]\n single_post_data.append(date_info)\n if debug == True:\n print(\" date_string:\", date_string,\n \" datetime:\", date_info)\n continue\n except:\n pass\n\n # Extract other attributes according to structure\n # <dd>_KEY_: <a href=\"./viewtopic.php?f=3&amp;t=_TOPIC_&amp;\n # hilit=_SEARCH_TERM_&amp;sid=_SID_\">_VALUE_</a></dd>\n # extract link\n link = attribute_soup.find('a')\n if link is not None:\n link = link.get('href').strip()\n attributes = attribute_soup.text.split(':')\n attributes = list(map(str.strip, attributes))\n\n if len(attributes) == 2:\n # convert attribute to int if applicable\n try:\n attributes[1] = int(attributes[1])\n except:\n attributes[1] = attributes[1].encode(\n 'iso-8859-1', 'ignore').decode('utf8', 'ignore')\n attributes.append(link)\n single_post_data.append(attributes)\n if debug == True:\n print(\" attribute:\", attributes)\n\n # extract text body / somehow the decoding doesn't work\n body_soup = search_post_soup.find(\"div\", {\"class\": \"postbody\"})\n body = body_soup.text\n body = body.replace('\\n', ' ').strip()\n\n text = [\"Text\", body, None]\n single_post_data.append(text)\n\n post_as_dict = [post_info_as_dict(attribute)\n for attribute in single_post_data]\n post_as_json = json.dumps(post_as_dict, indent=4, sort_keys=True, default=str,ensure_ascii=False)\n post_data_json.append(post_as_json) \n\n if debug == True:\n print(\"--------TEXT--------\")\n print(\" text:\", text)\n print(\"########## OUTPUT DICT ############\")\n print(post_as_dict)\n\n return post_data_json", "def preprocess_post(self, post):\n # tokenize, clean, & tag part-of-speech for all words\n if self.document_level == 'postwise':\n\n doc_text = all_comments_from_post(post)\n # leave early if there's nothing there\n if doc_text == '':\n return []\n\n tokens = nltk.word_tokenize(doc_text)\n # TODO: skip this if there's no POS filtering args!\n tagged = nltk.pos_tag(tokens)\n\n # filter out most invalid words with valid_word()\n processed_document = []\n for word, pos_tag in tagged:\n if self.valid_word(word, pos_tag):\n cleaned_word = self.clean_word(word)\n # things like digits and other junk become empty string,\n # so exclude them from final document\n if cleaned_word:\n processed_document.append(cleaned_word)\n # finally, update the post\n post['postwise'] = {'tokens': processed_document, 'text': doc_text}\n self.postman.posts_write.update_one({'_id':post['_id']}, {'$set':post}, upsert=True)\n else:\n raise NotImplementedError('document_level: \"%s\"' % self.document_level)\n\n return processed_document", "def articles(self):\n articles = Post.objects.live().descendant_of(self)\n articles = articles.order_by('-date')\n\n return articles", "def markdown_report(issues, commits):\n print()\n print('Handled issues:')\n print()\n\n for issue in issues:\n markdown_item(\n '#{0} {1}'.format(\n issue.number,\n issue.title,\n ),\n issue.html_url,\n )\n\n print()\n print('Commits:')\n print()\n\n for commit in commits:\n markdown_item(\n '{0} - {1}'.format(\n commit.sha[:7],\n commit.commit.message.split('\\n')[0]\n ),\n commit.html_url,\n )", "def recent_posts(self):\n\n try:\n jsondoc = json.load(urllib.urlopen(\"http://reddit.com/user/%s.json\" % self.username))\n except:\n raise self.DoesNotExist\n \n posts = []\n for item in jsondoc['data']['children']:\n if item['kind'] == 't1':\n posts.append(Comment(item['data']))\n elif item['kind'] == 't3':\n posts.append(item['data'])\n\n return posts", "def markdown(s):\n md = markdown_module.Markdown(MARKDOWN_EXTENSIONS, safe_mode='remove')\n return mark_safe(md.convert(s))", "def parse_content(content):\n\n post = {}\n list_recipes = []\n parsed_html = BeautifulSoup(content, 'html.parser')\n\n for div in parsed_html.find_all('div', attrs={'class': 'post'}):\n div_content = div.find(\n 'div',\n attrs={'class': 'fusion-post-content-container'}\n )\n post = {\n 'post_link': div.a.attrs['href'],\n 'post_title': div.h1.get_text(),\n 'post_content': div_content.get_text(),\n }\n list_recipes.append(post)\n\n return list_recipes", "def create_post(self: User, content: str, is_public: bool, circles: List[Circle], reshareable: bool,\n reshared_from: Optional[Post], media_list: List[Media], mentioned_users: List[User],\n is_update_avatar: bool) \\\n -> Union[Post, bool]:\n if not content and not media_list:\n # a post has to have either content or media\n return False\n\n new_post = Post()\n new_post.eid = make_uuid()\n new_post.author = self.id\n if content:\n new_post.content = bleach.clean(content)\n new_post.is_public = is_public\n new_post.circles = circles\n new_post.media_list = media_list\n new_post.is_update_avatar = is_update_avatar\n\n if reshared_from and not reshareable:\n # if resharing from a post, this post must also be reshareable, otherwise it's logically wrong\n return False\n\n if reshared_from:\n if media_list:\n # when resharing, only allow content (text), e.g. no media\n return False\n\n if reshared_from.reshared_from:\n # if reshared_from itself is a reshared post, reshare reshared_from's original post\n # reshared_from.reshared_from is LazyReference so need to retrieve the full post\n reshared_from = get_in_post_cache(reshared_from.reshared_from.id)\n\n # same explanation for context_home_or_profile=False\n if not sees_post(self, reshared_from, context_home_or_profile=False):\n return False\n\n if not reshared_from.reshareable:\n return False\n\n new_post.reshared_from = reshared_from\n\n new_post.reshareable = reshareable\n new_post.save()\n\n if reshared_from:\n create_notification(\n self,\n notifying_href=new_post.make_href(),\n notifying_summary=new_post.content,\n notifying_action=NotifyingAction.Reshare,\n notified_href=reshared_from.make_href(),\n notified_summary=reshared_from.content,\n owner=reshared_from.author\n )\n # only cache reshared post\n set_in_post_cache(reshared_from)\n\n mention(\n self,\n notified_href=new_post.make_href(),\n notified_summary=new_post.content,\n mentioned_users=mentioned_users\n )\n\n return new_post" ]
[ "0.6771389", "0.62583774", "0.59715056", "0.59394884", "0.59075785", "0.58579963", "0.5806396", "0.58032244", "0.57632285", "0.5762416", "0.575449", "0.5736609", "0.5716471", "0.5630635", "0.56303996", "0.55910885", "0.5574774", "0.55405563", "0.5535492", "0.55039394", "0.54988575", "0.54572403", "0.54377383", "0.5424497", "0.54201555", "0.5406719", "0.53972965", "0.5394924", "0.5387797", "0.53724754", "0.53600335", "0.53571635", "0.5346246", "0.53345865", "0.5332285", "0.53102374", "0.5299254", "0.5296881", "0.5284506", "0.52740026", "0.52509665", "0.5248356", "0.52453095", "0.524399", "0.5227462", "0.5225354", "0.518877", "0.51865435", "0.51857287", "0.5182739", "0.5180003", "0.51759213", "0.5165803", "0.51492286", "0.51427823", "0.50953996", "0.5092073", "0.5091725", "0.50911665", "0.50814784", "0.50542814", "0.50516486", "0.5040629", "0.5022762", "0.501704", "0.5010743", "0.5006939", "0.5000048", "0.49950716", "0.49809393", "0.49788746", "0.49782053", "0.49725455", "0.49632952", "0.49621478", "0.49618363", "0.49566904", "0.49480003", "0.49439752", "0.49334136", "0.49301046", "0.4926827", "0.49252987", "0.49096888", "0.4908166", "0.49079403", "0.49048257", "0.49046794", "0.49018636", "0.48963535", "0.4894877", "0.4894122", "0.48908907", "0.4879153", "0.4878199", "0.4876345", "0.4874374", "0.4869113", "0.4866565", "0.48632228" ]
0.7360025
0
Purge root dir from irrelevant html files
def purge_htmlfiles(args, posts): htmlist = list_of_htmlfiles(args, posts) html_to_remove = list() for fullname in glob.glob(os.path.join(args.root, '*.htm*')): if fullname not in htmlist: html_to_remove.append(fullname) if len(html_to_remove) > args.thumbnails.threshold_htmlfiles: inpt = 'x' while inpt not in 'yn': inpt = input(f'{len(html_to_remove)} html files to remove. Continue [y|n]? ').lower() if inpt == 'n': return for name in html_to_remove: print('Removing html files', name) os.remove(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(ctx):\n ctx.run(\"rm -rf build/html\")", "def cleanup():\n download_dir = settings.DOWNLOAD_BASE_DIR\n\n for base, dirs, files in os.walk(download_dir):\n for dir in dirs:\n shutil.rmtree(download_dir + dir)", "def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()", "def html_clean(options):\r\n remake_directories(options.sphinx.doctrees, options.html.outdir)\r\n html(options)\r\n return", "def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)", "def clean(self):\n if os.path.exists(self.paths['build_dir']):\n shutil.rmtree(self.paths['build_dir'])\n if os.path.exists(os.path.join(self.base_dir, 'docs')):\n shutil.rmtree(os.path.join(self.base_dir, 'docs'))", "def tearDown(self):\n for root, dirs, files in os.walk(TEMPDIR, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n os.rmdir(root)", "def clean():\n clean_files()", "def purge_files(self):\n run_keyword(\"Purge Server Configuration\")\n run_keyword(\"Purge Cache Manager Configuration\")\n # TODO: Probably the only sane way to do this is to call\n # a helper script which runs as root.\n # run_keyword(\"Purge Cache\")\n valid = r'/vicep([a-z]|[a-h][a-z]|i[a-v])$'\n for vicep in glob.glob(\"/vicep*\"):\n if re.match(valid, vicep) and os.path.isdir(vicep):\n run_keyword(\"Purge Directory\", \"%s/AFSIDat\" % vicep)\n run_keyword(\"Purge Directory\", \"%s/Lock\" % vicep)\n for vheader in glob.glob(\"%s/V*.vol\" % vicep):\n run_keyword(\"Sudo\", \"rm -f %s\" % vheader)", "def purge_cache():\n for (dir_path, dir_names, file_names) in os.walk(CACHE, topdown=False):\n for file_name in file_names:\n if is_json_file(file_name):\n path = os.path.join(dir_path, file_name)\n print(\"Removing file “%s”\" % path)\n os.remove(path)\n for directory in dir_names:\n path = os.path.join(dir_path, directory)\n if not os.listdir(path):\n print(\"Removing directory “%s”\" % path)\n os.rmdir(path)", "def cleanDirecs(rootDir):\n for root, dirs, files in os.walk(rootDir, topdown=False):\n \n if not files:\n if not dirs:\n print(\"Removing {0}\".format(root))\n os.rmdir(os.path.join(rootDir, root))", "def clear_data():\n dir_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\", \"var/view_preprocessed/*\", \"var/tmp/*\"]\n\n for item in dir_list:\n print(\"[ - ] Removing\", item, \"\\n\")\n subprocess.run([\"rm\", \"-rf\", item])", "def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)", "def clean_home_subdir(self):\n\n self.log.debug(\"Cleaning up %s...\" % self.home_subdir_local)\n try:\n for tree in os.listdir(self.home_subdir_local):\n self.log.debug(\"... removing %s subtree\" % tree)\n path = os.path.join(self.home_subdir_local, tree)\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)\n except OSError, err:\n self.log.error(\"Cleaning up intel dir %s failed: %s\" % (self.home_subdir_local, err))", "def _cleanPackageDir(self, *_):\r\n for _, path in self._pkgDir:\r\n os.rmdir(os.path.join(self._rootfs, path))\r\n\r\n assert len(self._containers) == 0", "def clean():\n print(\"\\nCleaning the site from {}\\n\".format(_site_dest))\n rm(_site_dest)", "def _clean_bins():\n rmtree(LIBS_DIR)\n rmtree(BINS_DIR)\n rmtree(HEADERS_DIR)", "def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()", "def clean_build(c):\n c.run(\"rm -fr build/\")\n c.run(\"rm -fr dist/\")\n c.run(\"rm -fr xmlstarlet/config.h \" \"xmlstarlet/Makefile \" \"xmlstarlet/config.status\")\n c.run(\"rm -fr .eggs/\")\n c.run(\"find . -name '*.egg-info' -exec rm -fr {} +\")\n c.run(\"find . -name '*.egg' -exec rm -f {} +\")", "def clean(self):\n original_dir = os.getcwd()\n os.chdir(self.output)\n\n # Clear out directory\n file_list = os.listdir(self.output)\n\n for afile in file_list:\n if not afile.endswith('.gitignore'):\n path = os.path.join(self.output, afile)\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.remove(path)\n os.chdir(original_dir)", "def clean_build(context):\n context.run(\"rm -fr build/\")\n context.run(\"rm -fr dist/\")\n context.run(\"rm -fr .eggs/\")\n context.run(\"find . -name '*.egg-info' -exec rm -fr {} +\")\n context.run(\"find . -name '*.egg' -exec rm -f {} +\")", "def clean(self) -> None:\n # remove all *.py and *.pyi files in the folder\n for wc in [\"*.py\", \"*.pyi\", \"modules.json\"]:\n for f in (self.package_path).rglob(wc):\n f.unlink()", "def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)", "def clean(allimages, alldirs):\n\n for img in allimages:\n # Delete HTML files\n htmlfn = join(opts.root, img._dir._path, img._pagefn)\n if exists(htmlfn):\n if opts.verbose:\n print \"Deleting\", htmlfn\n try:\n os.unlink(htmlfn)\n except:\n print >> sys.stderr, \"Error: deleting\", htmlfn\n\n # Delete thumbnails\n if img._thumbfn:\n thumbfn = join(opts.root, img._thumbfn)\n if exists(thumbfn):\n if opts.verbose:\n print \"Deleting\", thumbfn\n try:\n os.unlink(thumbfn)\n img._thumbfn = None\n except:\n print >> sys.stderr, \"Error: deleting\", thumbfn\n\n for d in alldirs:\n files = dircache.listdir(join(opts.root, d._path))\n\n # Delete HTML files in directories\n for f in files:\n fn = join(opts.root, d._path, f)\n if f in [ dirindex_fn, allindex_fn, allcidx_fn,\n sortindex_fn, css_fn ] or \\\n f.startswith('trackindex-'):\n if opts.verbose:\n print \"Deleting\", fn\n try:\n os.unlink(fn)\n pass\n except:\n print >> sys.stderr, \"Error: deleting\", fn\n\n if f == index_fn and islink(fn):\n os.unlink(fn)", "def cleanup(self):\n if os.path.exists(self.tgzfile):\n os.remove(self.tgzfile)\n\n if os.path.exists(self.dirname):\n shutil.rmtree(self.dirname)", "def clean():\n for root, dirs, files in os.walk('.'):\n for item in dirs:\n if (item[0]!='.'):\n try:\n os.remove(os.path.join(item,'.DS_Store'))\n except:\n pass", "def clean():\n for f in [f for f in os.listdir() if f.endswith(\".part\")]:\n os.remove(f)", "def clean(session):\n clean_dirs = (\n get_path(\".cache\"),\n get_path(\".coverage\"),\n get_path(\".pytest_cache\"),\n get_path(\"__pycache__\"),\n get_path(\"build\"),\n get_path(\"dist\"),\n get_path(\"docs\", \"__pycache__\"),\n get_path(\"docs\", \"build\"),\n get_path(\"scripts\", \"macos\", \"__pycache__\"),\n get_path(\"src\", \"python\", \"bezier.egg-info\"),\n get_path(\"src\", \"python\", \"bezier\", \"__pycache__\"),\n get_path(\"tests\", \"__pycache__\"),\n get_path(\"tests\", \"functional\", \"__pycache__\"),\n get_path(\"tests\", \"unit\", \"__pycache__\"),\n get_path(\"tests\", \"unit\", \"hazmat\", \"__pycache__\"),\n get_path(\"wheelhouse\"),\n )\n clean_globs = (\n get_path(\".coverage\"),\n get_path(\"*.mod\"),\n get_path(\"*.pyc\"),\n get_path(\"docs\", \"abi\", \"example\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.pyc\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.pyd\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.so\"),\n get_path(\"src\", \"fortran\", \"*.o\"),\n get_path(\"tests\", \"*.pyc\"),\n get_path(\"tests\", \"functional\", \"*.pyc\"),\n get_path(\"tests\", \"unit\", \"*.pyc\"),\n )\n for dir_path in clean_dirs:\n session.run(shutil.rmtree, dir_path, ignore_errors=True)\n for glob_path in clean_globs:\n for filename in glob.glob(glob_path):\n session.run(os.remove, filename)", "def clean():\n sudo(\"rm -rf %(admin_webroot)s\" % env)", "def clean(ctx):\n ctx.run('rm -rf {dir}'.format(dir=ctx.build_dir.debian_dir))\n ctx.run('rm -rf {dir}'.format(dir=ctx.build_dir.dist_dir))\n ctx.run('rm -rf {dir}'.format(dir=DEBIAN_DPKG_DIR))", "def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))", "def clean(ctx):\n logger = logging.getLogger(__name__)\n\n root_project_dir = discover_conf_py_directory(ctx.obj[\"root_project_dir\"])\n dirnames = [\"py-api\", \"_build\", \"modules\", \"packages\", \"_doxygen\"]\n dirnames = [\n os.path.join(root_project_dir, dirname) for dirname in dirnames\n ]\n for dirname in dirnames:\n if os.path.isdir(dirname):\n shutil.rmtree(dirname)\n logger.debug(\"Cleaned up %r\", dirname)\n else:\n logger.debug(\"Did not clean up %r (missing)\", dirname)", "def clean():\n possible_outputs = (\n '{}.html'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.epub'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.pdf'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.docx'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.odt'.format(CONFIG['FULL_PROJECT_NAME']),\n )\n\n for filename in possible_outputs:\n if os.path.exists(filename):\n os.remove(filename)\n print(\"Removed {}\".format(filename))", "def cleanup(self):\n\n # check if the directory exists\n if not os.path.exists(self.path):\n return\n\n # check if the directory is a directory\n if not os.path.isdir(self.path):\n return\n\n # loop over content of directory and remove it\n for the_file in os.listdir(self.path):\n file_path = os.path.join(self.path, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n pass", "def scrub():\n\n\tlocal(\"rm -fr dist build\")\n\tlocal(\"find . -name \\\"*.pyc\\\" -exec rm '{}' ';'\")", "def cleanup() -> None:\n\n for fname in glob(os.path.join(tdir, 'alexandria.*')):\n if os.path.splitext(fname)[1] not in {'.c', '.h'}:\n os.unlink(fname)", "def clean_directory():\n print(\"INFO: Cleaning old files...\")\n if os.path.exists(os.path.join(os.path.dirname(__file__), 'Scripts')):\n try:\n shutil.rmtree(os.path.join(os.path.dirname(__file__), 'Scripts'))\n except OSError as error:\n print(\"Error: %s - %s.\" % (error.filename, error.strerror))", "def cleanup(self):\n\n if self.debug:\n print 'Running cleanup()'\n print 'Starting removing dead links'\n\n for root, dirs, files in os.walk(self.tags_folder):\n if files:\n for f in files:\n try:\n full_path = os.path.join(root, f)\n if not os.path.exists(os.readlink(full_path)):\n os.unlink(full_path)\n if self.debug:\n print 'Removing dead link %s' % full_path\n except OSError:\n pass\n\n if self.debug:\n print 'Starting removing empty directories'\n self._del_empty_dirs(self.tags_folder)", "def clearRunDirectory(self):\n for root, dirs, files in os.walk(self.run_dir, topdown=False):\n for name in files:\n if name.lower().endswith(('.cps', '.txt', '.sbml', '.csv')):\n os.remove(os.path.join(root, name))\n for name in dirs:\n if len(os.listdir(os.path.join(root, name)))==0:\n os.rmdir(os.path.join(root, name))", "def html():\n builtdocs = path(\"docs\") / options.sphinx.builddir / \"html\"\n destdir = path(PACKAGE) / \"docs\"\n destdir.rmtree()\n builtdocs.move(destdir)", "def delete_tempdirs(self):\n\n if self._noWebDir:\n shutil.rmtree(self.webTopDir)", "def purge_downloaded_files():\n for fpath in DOWNLOADED_FILEPATHS:\n if os.path.exists(fpath):\n os.remove(fpath)", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def __del__(self):\n shutil.rmtree(self.epub_dir)", "def devclean():\n click.echo(\"start clean your output folder...\")\n rm(OUTPUTDIR, recursive=True)", "def deleteIndexFileIfExists(self):\n try:\n os.remove(self.dir+'/index.html')\n except OSError:\n pass", "def clear_debug_files(root_path_):\n\n ext_file = [\n \".sdf\",\n \".VC.db\",\n \".idb\",\n \".exp\",\n \".aps\",\n \".pdb\",\n \".obj\",\n \".res\",\n \".log\",\n \".tlog\",\n \".manifest\",\n \".lastbuildstate\",\n \".pch\",\n \".ipch\",\n \".cache\",\n \".ilk\",\n \".ipdb\",\n \".iobj\",\n \".aps\",\n ]\n\n ext_dir = [\n \"ipch\",\n\n ]\n if os.path.exists(root_path_):\n for root, dirs, files in os.walk(root_path_, topdown=True):\n for file in files:\n filename = os.path.join(root, file)\n delete_file(filename, ext_file)\n \n for dir in dirs:\n dir_path = os.path.join(root, dir)\n if dir.lower() in ext_dir:\n print(dir_path);\n shutil.rmtree(dir_path)\n\n for a_dir in ext_dir:\n path = os.path.join(root_path_, a_dir)\n if os.path.exists(path):\n shutil.rmtree(path)", "def tearDown(self):\n for fn in self.tempImages:\n os.remove(os.path.join(self.root, fn))\n os.rmdir(self.root)", "def clean():\n if os.path.exists('_build'):\n shutil.rmtree('_build')", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)", "def clean():\n folders = ['utils_dfn/temp', 'utils_dfn/img', 'utils_dfn/mask', 'utils_dfn/output']\n for folder in folders:\n for item in os.listdir(folder):\n item_path = os.path.join(folder, item)\n if os.path.isdir(item_path):\n shutil.rmtree(item_path)\n elif os.path.isfile(item_path):\n os.remove(item_path)", "def clean(ctx):\n header(clean.__doc__)\n with ctx.cd(ROOT):\n for pattern in CLEAN_PATTERNS:\n info(\"Removing {0}\", pattern)\n ctx.run(\"rm -rf {0}\".format(pattern))", "def cleanDataDir(self):\n for strFile in os.listdir(self.strDataDir):\n os.remove(os.path.join(self.strDataDir, strFile))", "def _clean_up_optimization():\n for (root, dirs, files) in walk(TEMP_MODULES_DIR_PATH, topdown=False):\n for file in files:\n if file.startswith(\"__temp_\"):\n remove(f\"{root}/{file}\")\n try:\n rmdir(root)\n except OSError:\n G.warn_(f\"Unidentified file found in temporary directory: {root}\")", "def removeTmpDirs():\n p = Path(\".\")\n eggDirs = [x for x in p.glob(\"*.egg-info\") if x.is_dir()]\n eggDirs.append(Path(\"build\"))\n\n for d in eggDirs:\n if d.is_dir():\n shutil.rmtree(d)", "def _clean(base_dir):\n # remove the snakemake cache\n shutil.rmtree(os.path.join(base_dir, \".snakemake\"), ignore_errors=True)\n\n # remove seq2science caches\n shutil.rmtree(os.path.expanduser(os.path.join(xdg.XDG_CACHE_HOME, \"seq2science\")), ignore_errors=True)\n\n # remove historic seq2science cache location\n shutil.rmtree(os.path.expanduser(f\"~/.config/seq2science/\"), ignore_errors=True)\n\n print(\"All cleaned up!\")", "def clear_local_output_directory():\n output_path = '../output/*'\n files = glob.glob(output_path)\n for single_file in files:\n os.remove(single_file)", "def clean_retrosheet_files(self):\n # Get zipped and unzipped folder names\n zippedFileFolder = Filepath.get_retrosheet_folder(folder='zipped')\n unzippedFileFolder = Filepath.get_retrosheet_folder(folder='unzipped')\n\n # Clean out all files in both folders\n for folder in (zippedFileFolder, unzippedFileFolder):\n os.chdir(folder)\n for file in os.listdir(os.getcwd()): \n if os.path.isdir(file): \n shutil.rmtree(file)\n else: \n os.remove(file)", "def clean_docs(c):\n c.run(f\"rm -fr {DOCS_BUILD_DIR}\")", "def tearDown(self):\n # unittest.TestCase.tearDown(self)\n\n root = os.path.join(\".\", \"files\")\n endingList = os.listdir(root)\n rmList = [fn for fn in endingList if fn not in self.startingList]\n\n if self.oldRoot == root:\n for fn in rmList:\n fnFullPath = os.path.join(root, fn)\n if os.path.isdir(fnFullPath):\n os.rmdir(fnFullPath)\n else:\n os.remove(fnFullPath)\n\n os.chdir(self.oldRoot)", "def cleancosmeticdir(rootdir):\n for subdir in [t120.t120_ofst_dir,t120.t120_flat_dir,t120.t120_dark_dir]:\n tocleandir = rootdir + t120.t120_ofst_dir\n t120.log.info('tocleandir: '+tocleandir)\n pattern = tocleandir+'master*.fit*'\n tocleanlist = glob.glob(pattern)\n if len(tocleanlist):\n for tocleanfile in tocleanlist:\n t120.log.info('Now removing file: '+tocleanfile)\n os.remove(tocleanfile)\n return", "def _remove_files_dirs(self):\n if self.remove_remote_files_dirs:\n self._remove_remote_files_dirs()", "def tearDown(self) -> None:\n filtered = [f for f in glob.glob('steps/tests/test_output/*') if not re.match(r'\\.keep', f)]\n for file in filtered:\n try:\n if Path(file).is_dir():\n shutil.rmtree(file)\n else:\n os.remove(file)\n except PermissionError as pe:\n # We don't necessarily care that much\n continue", "def clean(self):\n if self.options.format != 'svg':\n for svgfile in self.svgouts.itervalues():\n os.remove(svgfile)\n os.rmdir(self.tmpdir)", "def cleanup(destination_subdir):\n sp.check_call(f\"rm {destination_subdir}/*.bam\", shell=True)\n sp.check_call(f\"rm {destination_subdir}/*.sam\", shell=True)\n sp.check_call(f\"rm -rf ./index_files\", shell=True)", "def __remove_base_directory__():\n p = subprocess.Popen('rm -rf {}/.wcscanner'.format(context.__BASE_PATH__), shell=True)\n p.wait()", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def clean_target_pages_dir():\n if not os.path.exists(target_pages_dir):\n os.makedirs(target_pages_dir)\n else:\n shutil.rmtree(target_pages_dir)\n os.mkdir(target_pages_dir)", "def clean_cwd():\n\n # Generator of the files generated for each runs\n del_files = (file for file in os.listdir() if file.endswith('.vtk')\n or file.endswith('.dat')\n or file.startswith('eeldata')\n or file.endswith('.log'))\n\n for file in del_files:\n try:\n os.remove(file)\n print(\"\\rRemoved {:s} succesfully!\".format(file), end=' '*15)\n except:\n print(\"\\rFailed to remove {:s}\".format(file))\n raise\n\n print('')", "def purge(root=os.path.join(\"~\", \".daisykit\", \"assets\")):\n root = os.path.expanduser(root)\n files = os.listdir(root)\n for f in files:\n if f.endswith(\".params\"):\n os.remove(os.path.join(root, f))", "def removeEmpties(self,name):\n empties = set()\n projectDir = dirs['installers'].join(name)\n for asDir,sDirs,sFiles in os.walk(projectDir.s):\n if not (sDirs or sFiles): empties.add(GPath(asDir))\n for empty in empties: empty.removedirs()\n projectDir.makedirs() #--In case it just got wiped out.", "def clean_up_temp_dir():\n files = glob.glob(f'{CONFIG_DIR}/tmp/*')\n for f in files:\n try:\n os.remove(f)\n except Exception:\n pass", "def tearDown(self):\n if self.rootdir and os.path.exists(self.rootdir):\n shutil.rmtree(self.rootdir)", "def clear(self, exclude=None):\n exclude = exclude or []\n for root, dirs, files in os.walk(self.config.output_dir):\n for f in files:\n if f not in exclude:\n os.unlink(os.path.join(root, f))\n for d in dirs:\n if d not in exclude:\n shutil.rmtree(os.path.join(root, d))", "def __del__(self):\n shutil.rmtree('tmp')\n self.quit_browser()", "def clean(self) -> None:\n if self.out_dir.exists():\n shutil.rmtree(self.out_dir)", "def remove(self): \n self.doRoot(self.removeDir)\n settings.getChanged('mosh.resourceReplacer.applied').remove(self.file)", "def delete_files_for_package(self, package):\n files = self.find_files_for_package(package, absolute_path=True)\n if not files:\n return\n path = os.path.dirname(files[0])\n for file in files:\n if os.path.exists(file):\n log.debug(\"Removing file '%s'\" % (file))\n os.unlink(file)\n if os.path.isdir(path) and os.listdir(path) == []:\n log.debug(\"Remove empty package repository '%s'\" % (path))\n os.rmdir(path)", "def clean_output_folder(output_folder):\n for root, dirs, files in os.walk(output_folder):\n for f in files:\n os.unlink(os.path.join(root, f))\n for d in dirs:\n shutil.rmtree(os.path.join(root, d))", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def remove(args):\n files = []\n for path in args.files:\n if os.path.isdir(path):\n ft = filetree(path)\n files.extend(ft.filelist())\n else:\n files.append(path)\n for path in files:\n relpath = os.path.normpath(os.path.relpath(path, args.base))\n if relpath in args.cache:\n del args.cache[args.cache.index(relpath)]\n if args.delete and os.path.exists(path):\n os.remove(path)\n args.update = True\n return", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def compress_html():\n if not os.path.exists(PATH_HTML):\n os.makedirs(PATH_HTML)\n os.chdir(PATH_HTML)\n try:\n zip_csv = ZipFile(SITE_NAME + '_' + DATE + '_html.zip', 'a')\n for file in glob.glob(\"*\" + DATE + \"*\" + \"html\"):\n zip_csv.write(file)\n os.remove(file)\n log.info(\"Compressing HTML files\")\n except Exception as e:\n log.error('Error when compressing html')\n log.info(type(e).__name__ + str(e))\n os.chdir(PROJECT_PATH)", "def cleanup():\n if len(env.releases) > 3:\n directories = env.releases\n directories.reverse()\n del directories[:3]\n env.directories = ' '.join([ '%(releases_path)s/%(release)s' % { 'releases_path':env.releases_path, 'release':release } for release in directories ])\n run('rm -rf %(directories)s' % env)", "def clear(self, cacheDir):", "def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)", "def _delete_data (self, path):\n head, tail = os.path.split(path)\n for subdir, dirs, files in os.walk(head):\n for file in files:\n if tail in file:\n os.remove(os.path.join(subdir, file))", "def clean(ctx, so=False, cache=False):\n for name in ctx.shell.files('.', '.coverage*', recursive=False):\n ctx.shell.rm(name)\n for name in ctx.shell.files('bench', '.out.*', recursive=False):\n ctx.shell.rm(name)\n ctx.shell.rm_rf(\n 'docs/coverage',\n 'docs/gcov',\n 'build',\n 'dist',\n 'wheel/dist',\n ctx.doc.userdoc,\n 'docs/_userdoc/_build',\n ctx.doc.website.source,\n ctx.doc.website.target,\n )\n if cache:\n cacheclean(ctx)\n if so:\n soclean(ctx)", "def clean_files(self):\n self.filenames.clear()", "def tearDown(self):\n rmtree(getcwd(), ignore_errors=True)", "def tearDown(self):\n rmtree(getcwd(), ignore_errors=True)", "def clean_all_folder():\n LOGGER.warning('removal of old files has been temporarily disabled')\n # paths_to_clean = CFG.remove_files\n # if paths_to_clean: # pylint: disable=using-constant-test\n # for remove_config in paths_to_clean: # pylint: disable=not-an-iterable\n # name = tuple(remove_config.keys())[0]\n # LOGGER.info(f'processing: {name}')\n # remove_config = remove_config[name]\n # if 'folder' not in remove_config.keys():\n # LOGGER.error(f'missing \"folder\" in {name}')\n # return\n # if 'age' not in remove_config.keys():\n # LOGGER.error(f'missing \"age\" in {name}')\n # return\n # if not os.path.exists(remove_config['folder']):\n # LOGGER.error(f'path does not exist: {remove_config[\"folder\"]}')\n # return\n # _remove_old_files_from_folder(**remove_config)\n # else:\n # LOGGER.debug('no folder to clean')", "def remover_files():\n directory = os.getcwd()\n for file_name in glob.glob((\"{}/tmp/*\").format(directory)):\n remove(file_name)", "def reset():\n local('cd {{ project_name }} && \\\n rm -rf static && rm -rf gzip && rm -rf build')", "def cleanup_step(self):\n self.clean_home_subdir()\n\n super(IntelBase, self).cleanup_step()", "def remove_dir_content(path):\n for item in os.listdir(path):\n p = os.path.join(path, item)\n if os.path.isdir(p):\n shutil.rmtree(p)\n else:\n os.unlink(p)", "def delete_previous_files(schema_name, path_template, path_static):\n list_file_static = listdir(path_static)\n list_file_template = listdir(path_template)\n if schema_name in list_file_static:\n tree_path = path.join(path_static, schema_name)\n rmtree(tree_path, ignore_errors=True)\n html_file_name = \"wrap_\" + schema_name + \".html\"\n if html_file_name in list_file_template:\n html_file_path = path.join(path_template, html_file_name)\n remove(html_file_path)", "def clean_local():\n local('rm -fr build')\n local('mkdir -p build')", "def cleanup(self):\n\n print \"Cleaning up...\",\n sys.stdout.flush()\n\n builddir = os.path.join(self.build)\n\n comm = 'rm -rf '+builddir\n #+' '+libdir+' '+logdir\n (output, error, retz) = runShellCommand(comm)\n\n print \"done.\"" ]
[ "0.7720015", "0.6934762", "0.68778396", "0.6763646", "0.66906065", "0.6680769", "0.668029", "0.66643465", "0.66492623", "0.6645479", "0.6635241", "0.6626942", "0.6605654", "0.65854466", "0.654416", "0.6536765", "0.6526797", "0.64898103", "0.64494216", "0.6443703", "0.6431596", "0.64281493", "0.6421824", "0.6420025", "0.64162767", "0.6400494", "0.6388606", "0.6372155", "0.6369315", "0.63596666", "0.63587725", "0.6351041", "0.6349154", "0.63455576", "0.6341319", "0.6336749", "0.63364047", "0.63309884", "0.63142085", "0.63126796", "0.63040906", "0.62925726", "0.6287985", "0.6283181", "0.62622315", "0.6255875", "0.62524146", "0.6244203", "0.6240606", "0.6233933", "0.62319875", "0.62312895", "0.62232745", "0.62017304", "0.6197875", "0.61842704", "0.6179306", "0.61736083", "0.61705077", "0.61704785", "0.61690754", "0.61639625", "0.6163873", "0.6163449", "0.6159685", "0.61484927", "0.61365265", "0.6125922", "0.6111614", "0.6108433", "0.6107462", "0.61020565", "0.6096402", "0.60726833", "0.60707897", "0.60657144", "0.60604143", "0.60491663", "0.6045021", "0.6043312", "0.6034387", "0.6021114", "0.60198224", "0.60039896", "0.6000147", "0.5998619", "0.5995463", "0.59888834", "0.5985517", "0.5984871", "0.59810084", "0.59810084", "0.59805375", "0.5977186", "0.5975519", "0.5974188", "0.5971434", "0.59689987", "0.596187", "0.59586084" ]
0.72082686
1
Purge thumbnail dir from irrelevant thumbnails
def purge_thumbnails(args, thumbdir, posts, diary=False): thumblist = list_of_thumbnails(posts, diary) thumbs_to_remove = list() for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')): if os.path.basename(fullname) not in thumblist: thumbs_to_remove.append(fullname) if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs: inpt = 'x' while inpt not in 'yn': inpt = input(f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? ').lower() if inpt == 'n': return for name in thumbs_to_remove: print('Removing thumbnail', name) os.remove(name) info_fullname = os.path.splitext(name)[0] + '.info' if os.path.exists(info_fullname): os.remove(info_fullname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_thumbnails(self):", "def delete_thumbnail(self, thumbnail_name):", "def tearDown(self):\n for fn in self.tempImages:\n os.remove(os.path.join(self.root, fn))\n os.rmdir(self.root)", "def clear_tmp_folder(self):\r\n for file in os.listdir(self.temp_dir):\r\n if file.endswith('.png') or file.endswith('.jpg'):\r\n path = os.path.join(self.temp_dir, file)\r\n print ('Cleaned up {}'.format(path))\r\n os.remove(path)", "def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()", "def clear_thumbnail(self):\n from anima.ui import utils\n utils.clear_thumbnail(self.thumbnail_graphics_view)", "def cleanup():\n cmd='docker rmi --force $(docker images -a -q)'\n bash_command(\"Deleting all images\", cmd)", "def clearImageFolder():\n filelist = listImageFolder()\n for f in filelist:\n os.remove('{}/{}'.format(imageFolder, f))", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))", "def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)", "def space_cleaning():\n for file in os.listdir(\".\"):\n if file.endswith(\".png\"):\n os.remove(file)", "def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def clean_up_temp_dir():\n files = glob.glob(f'{CONFIG_DIR}/tmp/*')\n for f in files:\n try:\n os.remove(f)\n except Exception:\n pass", "def clearAllPictures(self):\n shutil.rmtree(PNG_OUTPUT_PATH)\n os.makedirs(PNG_OUTPUT_PATH)", "def cleanup():\n download_dir = settings.DOWNLOAD_BASE_DIR\n\n for base, dirs, files in os.walk(download_dir):\n for dir in dirs:\n shutil.rmtree(download_dir + dir)", "def delete_file(self, instance, sender, **kwargs):\n super(AutoImageField, self).delete_file(instance, sender)\n if getattr(instance, self.attname):\n # Get full path and the base directory that contains the file\n file_name = getattr(instance,self.name).name\n basedir = os.path.dirname(file_name)\n \n # Look for thumbnails created from filters for the current filename\n # and delete the files\n mask = add_to_basename(file_name, '_*')\n [os.remove(os.path.join(basedir, f)) for f in glob.glob(mask)]", "def auto_delete_image_and_thumbnail_on_delete(sender, instance, **kwargs):\n if instance.image:\n if os.path.isfile(instance.image.path):\n os.remove(instance.image.path)\n\n if instance.thumbnail:\n if os.path.isfile(instance.thumbnail.path):\n os.remove(instance.thumbnail.path)\n\n return False", "def delete_leftovers(self):\n for each_file, artist in self.past_songs_db_data:\n if os.path.isfile(each_file): \n os.remove(each_file)\n print \"Deleted \" + each_file\n\n for each_file in os.listdir(\".\"):\n if each_file.endswith(\".jpg\"):\n os.remove(each_file)", "def cleanup_old_backups(self):\n print(\"Cleaning Old Backups for media files\")\n\n file_list = utils.get_backup_file_list(\n self.get_databasename(),\n self.get_servername(),\n 'media.tar.gz',\n self.storage\n )\n\n for backup_date, filename in file_list[0:-dbbackup_settings.CLEANUP_KEEP_MEDIA]:\n if int(backup_date.strftime(\"%d\")) != 1:\n print(\" Deleting: %s\" % filename)\n self.storage.delete_file(filename)", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def clean_temp_storage_dir(self, filenames):\n for fn in filenames:\n try:\n pathlib.Path(pathlib.PurePath(self.temp_storage_dir, fn)).unlink()\n except FileNotFoundError:\n pass", "def tearDown(self):\n shutil.rmtree(self.test_pic_folder)", "def clean(self):\n if self.image:\n self.glance.images.delete(self.image['id'])\n\n if self.image_file:\n shutil.rmtree(self.download_path)", "def cleanup(self):\n if os.path.exists(self.tgzfile):\n os.remove(self.tgzfile)\n\n if os.path.exists(self.dirname):\n shutil.rmtree(self.dirname)", "def remove_unactionable_images(data):\n os.makedirs(os.path.join(data, 'removed'), exist_ok=True)\n for product in os.listdir(data):\n if product.startswith('product') is False:\n continue\n path = os.path.join(data, product)\n if os.path.isdir(path) is False:\n continue\n if is_useful(path, 0.5) is False:\n print('\\tRemoving ' + path)\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, 'removed', product + '.tiff'))\n shutil.rmtree(path)\n else:\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, product + '.tiff'))", "def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)", "def clean(allimages, alldirs):\n\n for img in allimages:\n # Delete HTML files\n htmlfn = join(opts.root, img._dir._path, img._pagefn)\n if exists(htmlfn):\n if opts.verbose:\n print \"Deleting\", htmlfn\n try:\n os.unlink(htmlfn)\n except:\n print >> sys.stderr, \"Error: deleting\", htmlfn\n\n # Delete thumbnails\n if img._thumbfn:\n thumbfn = join(opts.root, img._thumbfn)\n if exists(thumbfn):\n if opts.verbose:\n print \"Deleting\", thumbfn\n try:\n os.unlink(thumbfn)\n img._thumbfn = None\n except:\n print >> sys.stderr, \"Error: deleting\", thumbfn\n\n for d in alldirs:\n files = dircache.listdir(join(opts.root, d._path))\n\n # Delete HTML files in directories\n for f in files:\n fn = join(opts.root, d._path, f)\n if f in [ dirindex_fn, allindex_fn, allcidx_fn,\n sortindex_fn, css_fn ] or \\\n f.startswith('trackindex-'):\n if opts.verbose:\n print \"Deleting\", fn\n try:\n os.unlink(fn)\n pass\n except:\n print >> sys.stderr, \"Error: deleting\", fn\n\n if f == index_fn and islink(fn):\n os.unlink(fn)", "def clean():\n folders = ['utils_dfn/temp', 'utils_dfn/img', 'utils_dfn/mask', 'utils_dfn/output']\n for folder in folders:\n for item in os.listdir(folder):\n item_path = os.path.join(folder, item)\n if os.path.isdir(item_path):\n shutil.rmtree(item_path)\n elif os.path.isfile(item_path):\n os.remove(item_path)", "def clean(self):\n\n for metric in self.metricList:\n listf = glob.glob(\n '{}/*_{}_{}*'.format(self.outDir, metric.name, self.num))\n if len(listf) > 0:\n for val in listf:\n os.system('rm {}'.format(val))", "def purge(dir, pattern):\n for p in Path(dir).glob(pattern):\n logging.debug(f\"Deleting - {p}\")\n p.unlink()", "def clean_webp_textures():\n for webp in PNG_TEXTURES['output_files']:\n if os.path.isfile(webp):\n os.remove(webp)", "def clean(self):\n if self.options.format != 'svg':\n for svgfile in self.svgouts.itervalues():\n os.remove(svgfile)\n os.rmdir(self.tmpdir)", "def purge_htmlfiles(args, posts):\n htmlist = list_of_htmlfiles(args, posts)\n html_to_remove = list()\n for fullname in glob.glob(os.path.join(args.root, '*.htm*')):\n if fullname not in htmlist:\n html_to_remove.append(fullname)\n\n if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(f'{len(html_to_remove)} html files to remove. Continue [y|n]? ').lower()\n if inpt == 'n':\n return\n\n for name in html_to_remove:\n print('Removing html files', name)\n os.remove(name)", "def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)", "def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()", "def tearDown(self):\n for root, dirs, files in os.walk(TEMPDIR, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n os.rmdir(root)", "def clean():\n clean_files()", "def delete_bad_images(bad_image_urls):\n for url in bad_image_urls:\n try:\n os.remove(cfg.PHOTO_DIR + web.get_file_name(url))\n except Exception as e:\n print(\"Error while removing: {0}, E: {1}\".format(url, e))\n\n print(\"# removed image(s): {0}\".format(len(bad_image_urls)))", "def delete_AllImgs(self):\n self.listImages.remove_all_imgs()", "def clear_figures() -> None:\n \n for filename in os.listdir(FIGURE_DIR):\n filepath = os.path.join(FIGURE_DIR, filename)\n try:\n shutil.rmtree(filepath)\n except OSError:\n os.remove(filepath)", "def clearImageCache(self):\n if os.path.exists(\"./cache/\"):\n shutil.rmtree(\"./cache/\")", "def delete_test_image(image_field):\n warnings.warn(DeprecationWarning(\n \"delete_test_image() is deprecated in favour of the \"\n \"get_sample_image() context manager.\"), stacklevel=2)\n # ensure all thumbs are deleted\n for filename in glob.glob(\n os.path.join(\n settings.MEDIA_ROOT, 'thumbs', image_field.name.split('/')[-1]\n ) + '*'\n ):\n os.unlink(filename)\n # delete the saved file\n image_field.delete()", "def __del__(self):\n\t\tif self.temp_dir:\n\t\t\tself.temp_dir.cleanup()", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def purge_files(self):\n run_keyword(\"Purge Server Configuration\")\n run_keyword(\"Purge Cache Manager Configuration\")\n # TODO: Probably the only sane way to do this is to call\n # a helper script which runs as root.\n # run_keyword(\"Purge Cache\")\n valid = r'/vicep([a-z]|[a-h][a-z]|i[a-v])$'\n for vicep in glob.glob(\"/vicep*\"):\n if re.match(valid, vicep) and os.path.isdir(vicep):\n run_keyword(\"Purge Directory\", \"%s/AFSIDat\" % vicep)\n run_keyword(\"Purge Directory\", \"%s/Lock\" % vicep)\n for vheader in glob.glob(\"%s/V*.vol\" % vicep):\n run_keyword(\"Sudo\", \"rm -f %s\" % vheader)", "def clean_chunk_files(dirpath):\n workdir = os.getcwd()\n os.chdir(dirpath)\n for filename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n os.remove(filename)\n os.chdir(workdir)", "def main(directory=\"/images\"):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n parent_dir = os.path.dirname(dir_path)\n threshold = 5000\n start = time.time()\n num_images = 0\n for fn in os.listdir(parent_dir + directory):\n num_images += 1\n full_path = parent_dir + directory + \"/\" + fn\n if os.stat(full_path).st_size < threshold:\n os.remove(full_path)\n print(\"deleted file\", fn)\n print(\"Total files\", num_images)\n curr_time = time.time()\n print(\"Time run so far\", round(curr_time - start, \"\\n\"))\n end = time.time()\n print(\"Time to delete files:\", round(end - start), \"seconds\")\n print(\"Number of images\", num_images)", "def teardown():\n os.remove('green-dot.tif')\n os.remove('green-dot.jpg')\n os.remove('green-dot.png')", "def _clean_up_temporary_files(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n tf.gfile.Remove(filepath)\n\n tmp_dir = os.path.join(dataset_dir, 'cifar-100-python')\n tf.gfile.DeleteRecursively(tmp_dir)", "def cleanup(self):\n\t\tfor filename in self.cfg_files:\n\t\t\tif os.path.isfile(filename):\n\t\t\t\tsize = os.stat(filename)[6]\n\t\t\t\tif size == 0:\n\t\t\t\t\tos.remove(filename)\n\n\t\treturn True", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def _clean_bins():\n rmtree(LIBS_DIR)\n rmtree(BINS_DIR)\n rmtree(HEADERS_DIR)", "def clear_outdated_files():\n for f in os.listdir(MEDIA_ROOT):\n file_path = os.path.join(MEDIA_ROOT, f)\n if os.path.isfile(file_path) and os.stat(file_path).st_mtime < time.time() - STORE_PDF_DAYS * 86400:\n os.remove(file_path)", "def purge_downloaded_files():\n for fpath in DOWNLOADED_FILEPATHS:\n if os.path.exists(fpath):\n os.remove(fpath)", "def test_cleanup(self):\n imgurl = \"{}spei03.nc\".format(self.processor.base_url)\n httpretty.register_uri(httpretty.GET, imgurl,\n body=get_mock_image())\n self.processor.download(imgurl, 'spei03.tif')\n self.assertNotEqual([], glob.glob(os.path.join(\n self.processor.tmp_dir, self.processor.prefix + '*')))\n self.processor.cleanup()\n self.assertEquals([], glob.glob(os.path.join(\n self.processor.tmp_dir, self.processor.prefix + '*')))", "def cleanup(self):\n if self.path and os.path.exists(self.path) and not self.dont_remove:\n rmtree(self.path)\n del self.path\n if self in self.instances:\n self.instances.remove(self)", "def clear_obsolete_trash():\n minute_ago = datetime.now() - timedelta(minutes=1)\n Picture.trash.filter(trashed_time__lt=minute_ago).delete()", "def cleanup(e):\n for f in e.files:\n try:\n if os.path.isfile(f):\n os.remove(f)\n except OSError:\n continue\n\n return", "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "def tearDown(self):\n for d in os.listdir(tmp_dir_path):\n d_path = os.path.join(tmp_dir_path,d)\n try:\n os.remove(d_path)\n except:\n for f in os.listdir(d_path):\n f_path = os.path.join(d_path,f)\n os.remove(f_path)\n os.rmdir(d_path)\n assert os.listdir(tmp_dir_path) == []", "def test_thumbnail(self):\n pub = PublicationFactory(thumbnail__filename=\"tester.jpg\")\n self.assertEqual(\n pub.thumbnail.url, f\"/media/reading/publications/{pub.slug}/tester.jpg\"\n )\n self.assertTrue(\n pub.thumbnail.path.endswith, f\"/reading/publications/{pub.slug}/tester.jpg\"\n )\n\n # Tidy up:\n pub.thumbnail.delete()", "def cleanup(destination_subdir):\n sp.check_call(f\"rm {destination_subdir}/*.bam\", shell=True)\n sp.check_call(f\"rm {destination_subdir}/*.sam\", shell=True)\n sp.check_call(f\"rm -rf ./index_files\", shell=True)", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def purge(self):\n pass", "def tearDown(self):\n if os.path.exists(settings.MEDIA_ROOT):\n shutil.rmtree(settings.MEDIA_ROOT)", "def tearDown(self):\n if os.path.exists(settings.MEDIA_ROOT):\n shutil.rmtree(settings.MEDIA_ROOT)", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def del_imgs(path):\n files = os.listdir(path)\n for file in files:\n if file.endswith(\".jpg\"):\n os.remove(os.path.join(path, file))", "def clean():\n for f in [f for f in os.listdir() if f.endswith(\".part\")]:\n os.remove(f)", "def cleanup_intermediate_files(self):\n self.cmd(\"rm -f {local_temp_dir}/*rg_dict* \\\n {local_temp_dir}/*aln* \\\n {local_temp_dir}/snappy*\".\n format(\n local_temp_dir=self.local_temp_dir\n ),\n shell=True)", "def clean():\n clean_flatbuffer_binaries()\n clean_webp_textures()", "def clean_up_artifacts():\n logger.info(\"Removing artifacts from Sandbox\")\n for item in Analyzer.uploaded:\n # Perform the delete\n response = Samples.delete_sample(ids=item)\n if response[\"status_code\"] > 201:\n # File was not removed, log the failure\n logger.warning(\"Failed to delete %s\", item)\n else:\n logger.debug(\"Deleted %s\", item)\n logger.info(\"Artifact cleanup complete\")", "def tearDown(self) -> None:\n filtered = [f for f in glob.glob('steps/tests/test_output/*') if not re.match(r'\\.keep', f)]\n for file in filtered:\n try:\n if Path(file).is_dir():\n shutil.rmtree(file)\n else:\n os.remove(file)\n except PermissionError as pe:\n # We don't necessarily care that much\n continue", "def tearDownClass(self):\n if (os.path.exists(MEDIA_ROOT+\"/gitload_test\")):\n shutil.rmtree(MEDIA_ROOT+\"/gitload_test\")", "def cleanup(self):\r\n if self.tempDirectory != None:\r\n shutil.rmtree(self.tempDirectory, True)\r\n self.tempDirectory = None", "def purge_cache():\n for (dir_path, dir_names, file_names) in os.walk(CACHE, topdown=False):\n for file_name in file_names:\n if is_json_file(file_name):\n path = os.path.join(dir_path, file_name)\n print(\"Removing file “%s”\" % path)\n os.remove(path)\n for directory in dir_names:\n path = os.path.join(dir_path, directory)\n if not os.listdir(path):\n print(\"Removing directory “%s”\" % path)\n os.rmdir(path)", "def cleanup():\n if len(env.releases) > 3:\n directories = env.releases\n directories.reverse()\n del directories[:3]\n env.directories = ' '.join([ '%(releases_path)s/%(release)s' % { 'releases_path':env.releases_path, 'release':release } for release in directories ])\n run('rm -rf %(directories)s' % env)", "def sorl_delete(**kwargs):\n from sorl.thumbnail import delete\n delete(kwargs['file'])", "def purge() -> None:\r\n _purge_func(False)", "def __clean(path, pattern = '.tiff'):\n for f in os.listdir(path):\n if re.search(pattern, f):\n os.remove(os.path.join(path, f))\n\n print(\"directory cleaned\")", "def remove_thumbnail(inJSON):\n time.sleep(2)\n consoleOutput = exec_console_command(\"rm \" + inJSON + \";\" + constants.getExitStatus)\n\n if \"\\n1\" in consoleOutput:\n raise IOError(\"Thumbnail file doesn't exist to delete. No worries though, it was going to be deleted anyway!\")\n\n return 0", "def tearDown(self):\n account_models.User.objects.all().delete()\n photo_models.PhotoFeed.objects.all().delete()\n photo_models.PhotoClassification.objects.filter(name__in=[\"Rural\", \"Abstract\", \"City\"]).delete()\n test_helpers.clear_directory('backend/media/', '*.jpg')", "def cleanup_backups():\n try:\n yield\n finally:\n shutil.rmtree(\"tmp/backups\")", "def tearDown(self):\n for f in os.listdir('/tmp'):\n if not f.startswith(self.FILE_PREFIX):\n continue\n\n os.remove(os.path.join('/tmp', f))", "def cleanup(self):\n\n # check if the directory exists\n if not os.path.exists(self.path):\n return\n\n # check if the directory is a directory\n if not os.path.isdir(self.path):\n return\n\n # loop over content of directory and remove it\n for the_file in os.listdir(self.path):\n file_path = os.path.join(self.path, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n pass", "def purge_outdated(self):\n todelete = []\n sql = \"select rowid, path, mtime from pictures\"\n cur = self.con.execute(sql)\n for rowid, path_str, mtime in cur:\n if mtime and op.exists(path_str):\n picture_mtime = os.stat(path_str).st_mtime\n if int(picture_mtime) <= mtime:\n # not outdated\n continue\n todelete.append(rowid)\n if todelete:\n sql = \"delete from pictures where rowid in (%s)\" % ','.join(map(str, todelete))\n self.con.execute(sql)", "def cleanUpTemporaryFiles(options):\n os.system(\"rm \"+options.output_directory_per_run+\"/*.abundance\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*.phasing_score\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*regionsOfInterest*\")\n os.system(\"mv \"+options.output_directory_per_run+\"/* \"+options.output_directory_per_run+\"/../\")\n os.system(\"rm -rf \"+options.output_directory_per_run)", "def remover_files():\n directory = os.getcwd()\n for file_name in glob.glob((\"{}/tmp/*\").format(directory)):\n remove(file_name)", "def _final_cleanup(self):\n # Clean up and remove the temporary gisdbase\n self._cleanup()\n # Remove resource directories\n if \"error\" in self.run_state or \"terminated\" in self.run_state:\n self.storage_interface.remove_resources()", "def _clean_up_temporary_files(dataset_dir):\n return", "def cleanup(tag, keepimg, builder):\n manager = Manager('cleanup', tag, keepimg=keepimg,\n builder_hostname=builder)\n manager.run()", "def clean(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n shutil.rmtree(self.old_artifact_path)\n log.info(\"Removing old artifact path: %s\" % self.old_artifact_path)", "def __del__(self):\n for filename in self.files:\n unlink(filename)", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def _purge():\r\n _cache.clear()", "def cleanup(folder):\n os.system('rm -rf %s/*' % folder)", "def cleanup(self):\n\n if self.debug:\n print 'Running cleanup()'\n print 'Starting removing dead links'\n\n for root, dirs, files in os.walk(self.tags_folder):\n if files:\n for f in files:\n try:\n full_path = os.path.join(root, f)\n if not os.path.exists(os.readlink(full_path)):\n os.unlink(full_path)\n if self.debug:\n print 'Removing dead link %s' % full_path\n except OSError:\n pass\n\n if self.debug:\n print 'Starting removing empty directories'\n self._del_empty_dirs(self.tags_folder)", "def cleanup(tempdir):\n try:\n shutil.rmtree(tempdir)\n except OSError:\n pass", "def _cleanup_uploads(self):\n logger.debug(\"Performing blob upload cleanup\")\n\n while True:\n # Find all blob uploads older than the threshold (typically a week) and delete them.\n with UseThenDisconnect(app.config):\n stale_upload = model.get_stale_blob_upload(DELETION_DATE_THRESHOLD)\n if stale_upload is None:\n logger.debug(\"No additional stale blob uploads found\")\n return\n\n # Remove the stale upload from storage.\n logger.debug(\"Removing stale blob upload %s\", stale_upload.uuid)\n assert stale_upload.created <= (datetime.utcnow() - DELETION_DATE_THRESHOLD)\n\n try:\n storage.cancel_chunked_upload(\n [stale_upload.location_name], stale_upload.uuid, stale_upload.storage_metadata\n )\n except Exception as ex:\n logger.debug(\n \"Got error when trying to cancel chunked upload %s: %s\",\n stale_upload.uuid,\n ex.message,\n )\n\n # Delete the stale upload's row.\n with UseThenDisconnect(app.config):\n model.delete_blob_upload(stale_upload)\n\n logger.debug(\"Removed stale blob upload %s\", stale_upload.uuid)" ]
[ "0.7496226", "0.70975465", "0.6904142", "0.6789052", "0.65433615", "0.64652795", "0.64232606", "0.6410066", "0.635366", "0.6340497", "0.63255644", "0.63222384", "0.6308864", "0.630171", "0.6284883", "0.6272671", "0.6269268", "0.62621397", "0.62551665", "0.6248571", "0.6245241", "0.6237175", "0.6237087", "0.6200791", "0.61761653", "0.6172708", "0.6151164", "0.6135021", "0.61335015", "0.6127233", "0.6121967", "0.60975504", "0.60882884", "0.6064728", "0.606229", "0.60616326", "0.6060441", "0.6056319", "0.6048273", "0.6043328", "0.6034349", "0.6028236", "0.601911", "0.6018856", "0.60187143", "0.601212", "0.60049516", "0.60032994", "0.59956187", "0.5991564", "0.5988041", "0.5981097", "0.5970333", "0.59673053", "0.5967051", "0.5957385", "0.5953738", "0.5945983", "0.59356385", "0.5930897", "0.5929818", "0.59236217", "0.59211504", "0.59205663", "0.59203655", "0.59127027", "0.59127027", "0.59041286", "0.5901309", "0.59009284", "0.59000945", "0.58958435", "0.58943784", "0.5886687", "0.58847445", "0.58745676", "0.58707833", "0.5869204", "0.58661675", "0.58649737", "0.58645016", "0.58621603", "0.5858841", "0.58550274", "0.5852865", "0.58427393", "0.5838059", "0.5835754", "0.5816422", "0.5816223", "0.58068424", "0.58056927", "0.58030945", "0.57962143", "0.5787214", "0.57789904", "0.5761854", "0.5757138", "0.5754514", "0.57515055" ]
0.7741738
0
Return the list of full paths for files in source directory
def list_of_files(sourcedir, recursive): result = list() if recursive is False: listdir = sorted_listdir(os.listdir(sourcedir)) if '.nomedia' not in listdir: for basename in listdir: result.append(os.path.join(sourcedir, basename)) else: for root, dirs, files in os.walk(sourcedir): if '.nomedia' not in files: for basename in sorted_listdir(files): result.append(os.path.join(root, basename)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_source_files(self):\n return [\n path.as_posix()\n for path in _Path(self.src_dir).rglob(\"*\")\n if not path.is_dir()\n ] + [\n (path / \"CMakeLists.txt\").as_posix()\n for path in _PurePath(self.src_dir).parents\n ]", "def collect_project_source_files():\n source_files = glob.glob(PROJECT_SOURCE_FILES_FOLDER + '/**/*.py', recursive=True)\n # Insert root main.py at the beginning.\n source_files.insert(0, os.path.join(PROJECT_ROOT_FOLDER, 'main.py'))\n return list(map(lambda path: posixpath.join(*path.split('\\\\')), source_files))", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(), self.target_base)\r\n for src in self.sources:\r\n yield os.path.join(abs_target_base, src)", "def sources_relative_to_buildroot(self):\r\n for src in self.sources:\r\n yield os.path.join(self.target_base, src)", "def get_source_files(dir_name):\n return get_files(dir_name, \".h\") + get_files(dir_name, \".cpp\")", "def get_source_file_list(dir):\n extensions = ['h', 'c', 'cc', 'cpp', 'm', 'mm']\n extensions = tuple(['.' + x for x in extensions])\n\n files = set()\n for (dirpath, dirnames, filenames) in os.walk(dir):\n files.update([os.path.join(dirpath, name) for name in filenames if\n name.endswith(extensions)])\n\n return [os.path.abspath(file) for file in files]", "def source_dirs_files(fspath, fil=None):\r\n dirs = []\r\n files = []\r\n for child in fspath.listdir(fil=fil):\r\n if child.basename.startswith('.'):\r\n continue\r\n if child.check(dir=True):\r\n dirs.append(child)\r\n elif child.check(file=True):\r\n if child.ext in ['.pyc', '.pyo']:\r\n continue\r\n files.append(child)\r\n return sorted(dirs), sorted(files)", "def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)", "def get_path_names(directory):\n paths_without_source = set()\n paths = glob.glob(source + \"**/*.*\", recursive=True)\n for p in paths:\n paths_without_source.add(p.replace(directory, \"\", 1))\n\n return paths_without_source", "def getSourcePaths(self, makeGlyphs=True, makeKerning=True, makeInfo=True):\n paths = []\n for name in self.sources.keys():\n paths.append(self.sources[name][0].path)\n return paths", "def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.glob(os.path.normpath(os.path.join(top_dir, pattern)))\n return files", "def get_library_content(self):\n from glob import glob\n try:\n os.path.isdir(self.source)\n lst = glob(self.source + '/*')\n except TypeError:\n lst = self.source\n dircheck = True\n while dircheck is True:\n dircheck = False\n newlst = []\n for entry in lst:\n if os.path.isdir(entry):\n newlst.extend(glob(entry + '/*'))\n dircheck = True\n else:\n newlst.append(entry)\n lst = newlst\n return lst", "def source_paths(self):\n paths = self.config.get('static_dirs')\n if paths:\n return paths\n return [self.config.get('static_dir')]", "def FindCheckerFiles(path):\n if not path:\n Logger.fail(\"No source path provided\")\n elif os.path.isfile(path):\n return [ path ]\n elif os.path.isdir(path):\n foundFiles = []\n for root, dirs, files in os.walk(path):\n for file in files:\n extension = os.path.splitext(file)[1]\n if extension in [\".java\", \".smali\"]:\n foundFiles.append(os.path.join(root, file))\n return foundFiles\n else:\n Logger.fail(\"Source path \\\"\" + path + \"\\\" not found\")", "def getFiles(searchDir = './', extension = 'source'):\n from glob import glob \n\n return glob(searchDir+'/*.'+extension)", "def get_paths(file_path):\n return glob(path.join(file_path, '*'))", "def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths", "def expand_source_files(filenames, cwd=None):\n out = []\n for f in expand_globpaths(filenames.split(), cwd):\n if path_utils.isdir(f):\n # If we have a directory, collect all the .py files within it....\n out += recursive_glob(path_utils.join(f, \"**\", \"*.py\"))\n elif f.endswith(\".py\"):\n out.append(f)\n elif is_file_script(f, cwd):\n # .....and only process scripts when specfiied by the user.\n out.append(f)\n\n return set(out)", "def get_source_files(self):\n files = []\n for pkg in self._po_packages():\n files.append(pkg['template'])\n files.extend((item['po_file'] for item in\n self._po_package_contents(pkg)))\n return files", "def get_all_sources(remit):\n if remit == 'panzer' or remit == 'pandoc':\n os.chdir('source-'+remit)\n sourcelist = [name for name in os.listdir(\".\") if os.path.isdir(name)]\n os.chdir('..')\n else:\n # get the maximal list of sources for a diff\n pandoc_list = get_all_sources('pandoc')\n panzer_list = get_all_sources('panzer')\n sourcelist = list(set(pandoc_list+panzer_list))\n sourcelist.sort()\n return sourcelist", "def full_path(startPath,files):\n\n files = list_strings(files)\n base = os.path.split(startPath)[0]\n return [ os.path.join(base,f) for f in files ]", "def Get_Source(self):\r\n source_directory = filedialog.askdirectory(initialdir = initial_dir, title=\"Select source folder\")\r\n global source\r\n source = source_directory \r\n self.txt_srcPath.delete(0, 'end')\r\n self.txt_srcPath.insert(0, str(source_directory))\r\n file_type = self.txt_fileType.get()\r\n hrs= int(self.txt_fileAge.get())\r\n file_age = datetime.timedelta(hours=hrs)\r\n global filenames_list\r\n filenames_list = []\r\n global files_list\r\n files_list = []\r\n global ft_list\r\n ft_list = []\r\n for f in glob.iglob(os.path.join(source, file_type)):\r\n files_list = [os.path.splitext(f)[0]]\r\n filenames_list.append(f) \r\n return(filenames_list, source, files_list)", "def get_all_files(cwd):\n return os.listdir(cwd)", "def walkSource(sourcedir):\n for parent, dnames, fnames in os.walk(sourcedir):\n for fname in fnames:\n if fname not in SKIP_FILES:\n filename = os.path.join(parent, fname)\n if filename.endswith('.java') and os.path.isfile(filename):\n with open(filename, 'r') as f:\n lines = f.readlines()\n yield (lines, fname)", "def get_source_files(self):\n return zip(*self.distribution.scripts)[0]", "def find_dcds(src):\n\n dcd_paths = []\n\n for root, dirs, files in os.walk(src):\n for filename in files:\n if filename.endswith(\".dcd\"):\n dcd_paths.append(os.path.join(root, filename))\n\n return dcd_paths", "def find_src_files(kem_dir):\n for dirpath, _, filenames in os.walk(kem_dir):\n for fn in filenames:\n if fn.endswith('.c'):\n if dirpath != kem_dir:\n ndir = dirpath[len(kem_dir) + 1:]\n yield os.path.join(ndir, fn)\n else:\n yield fn", "def search(self, src, exclude_pattern = [\"**/*.pyc\"], include_pattern = [\"**/*.py\"]):\n src = os.path.abspath(src)\n \n _target = Path(src)\n _target._flavour.casefold = lambda x : x # basic windows path don't distinguish upper / lower case.\n allfiles = list(_target.glob(\"**/*\"))\n \n exclude = list()\n for _ex in exclude_pattern:\n exclude += _target.glob(_ex) \n \n include = list()\n for _in in include_pattern:\n include += _target.glob(_in) \n \n _target_path = set(allfiles) - set(exclude) | set(include)\n \n _target_dir_path = sorted(list(x for x in _target_path if x.is_dir() is True))\n _target_file_path = sorted(list(x for x in _target_path if x.is_file() is True))\n \n return _target_dir_path, _target_file_path", "def Sources():\n return _sources", "def get_source_paths():\r\n script_paths = set()\r\n try:\r\n script_paths.update(filter(None, os.environ.get(PYENV).split(os.pathsep)))\r\n script_paths.update(filter(None, os.environ.get(MELENV).split(os.pathsep)))\r\n except AttributeError:\r\n logger.debug('No custom environ variables set.')\r\n\r\n cwd = os.path.dirname(os.path.abspath(__file__))\r\n for each in os.listdir(cwd):\r\n path = os.path.join(cwd, each)\r\n if not os.path.isdir(path) or each.startswith(EXCLUDE_PATTERNS):\r\n continue\r\n script_paths.add(path)\r\n\r\n return script_paths", "def listdir(self, name, source, test_data=()):\n assert isinstance(source, config_types.Path)\n self.m.path.assert_absolute(source)\n result = self._run(\n name, ['listdir', source],\n lambda: self.test_api.listdir(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(x) for x in result.stdout.splitlines()]\n result.presentation.logs['listdir'] = map(str, ret)\n return ret", "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def get_dir_files(self, recursive=False):\n logging.info('Enumerating files under the source path (recursive=%s) ...', recursive)\n files = {}\n if not recursive:\n files[self.path_source] = [\n f for f in os.listdir(self.path_source) if os.path.isfile(os.path.join(self.path_source, f))\n ]\n else:\n for current_dir, sub_dirs, dir_files in os.walk(self.path_source):\n files[os.path.join(self.path_source, current_dir)] = [f for f in dir_files]\n\n return files", "def file_list(start_dir):\n file_list = []\n for root, dirs, files in os.walk(start_dir):\n for f in files:\n if f[0] != '.':\n file_list.append(f)\n return file_list", "def _get_pyfilelist(srcpath, usegitignore=True) -> list:\n gitignorefile = srcpath / Path(\".gitignore\")\n if usegitignore and gitignorefile.exists():\n with gitignorefile.open('r') as f:\n lines = f.read().splitlines()\n gitignore = [\n srcpath / Path(line)\n for line in lines\n if not line.strip().startswith(\"#\")\n and len(line.strip()) > 1\n and Path(line).suffix == \"\"\n ] + [srcpath / Path(\".git\")]\n viablepaths = [\n p for p in srcpath.glob(\"*/\") if p.is_dir() and p not in gitignore\n ]\n filelist = set().union(*[set(p.glob(\"**/*.py\")) for p in viablepaths])\n filelist = filelist.union(*[set(srcpath.glob('*.py'))])\n else:\n filelist = srcpath.glob(\"**/*.py\")\n return [p.relative_to(srcpath) for p in filelist]", "def list_files(startpath):\n for root, _, files in os.walk(startpath):\n for f in files:\n yield os.path.join(root, f)", "def list_sources(config, base_dir, verbose=False):\n for source in config.sources_under(abspath(base_dir)):\n if verbose:\n print(\"# %s (%s)\" % (source.nicedir, ' '.join(source.info)))\n else:\n print(source.nicedir)", "def filePaths(directory_with_files):\n\n # get a list of file names in directory\n list_of_files = os.listdir(directory_with_files) \n\n # join directory path and file name to get full paths to files\n filepaths = [os.path.join(directory_with_files, filename) for filename in list_of_files]\n\n return filepaths", "def files(self):\n self._printer('\\tFiles Walk')\n for directory in self.directory:\n for path in os.listdir(directory):\n full_path = os.path.join(directory, path)\n if os.path.isfile(full_path):\n if not path.startswith('.'):\n self.filepaths.append(full_path)\n return self._get_filepaths()", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def collect_source_hpp_files(self):\n for pattern in self.package_info.source_hpp_patterns:\n for filename in fnmatch.filter(self.source_hpp_files, pattern):\n self.package_info.source_hpp_files.append(os.path.basename(filename))\n self.source_dirs.add(os.path.abspath(os.path.dirname(filename)))\n\n for root, _, filenames in os.walk(self.source_root, followlinks=True):\n for pattern in self.package_info.source_hpp_patterns:\n for filename in fnmatch.filter(filenames, pattern):\n if \"pybindx\" not in filename:\n self.package_info.source_hpp_files.append(os.path.join(root, filename))\n self.package_info.source_hpp_files = [path for path in self.package_info.source_hpp_files\n if self.wrapper_root not in path]", "def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list", "def compile_files(root):\n files = [os.path.join(root, f) for f in os.listdir(root) if not f.startswith(\".\")]\n \n return files", "def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]", "def get_paths(input_folder):\n list_files = []\n conll_folder = glob.glob(input_folder + '/*.json')\n \n for filename in conll_folder:\n list_files.append(filename)\n\n return list_files", "def getBaseSrcFile(self) -> List[int]:\n ...", "def filepaths(self, langs) -> Generator[str, None, None]:\n for pv in self.projects(langs):\n yield from pv.filepaths()", "def findFiles(target, path):\r\n\tfiles = []\r\n\tlyst = os.listdir(path)\r\n\tfor element in lyst:\r\n\t\tif os.path.isfile(element):\r\n\t\t\tif target in element:\r\n\t\t\t\tfiles.append(path + os.sep + element)\r\n\t\telse:\r\n\t\t\tos.chdir(element)\r\n\t\t\tfiles.extend(findFiles(target, os.getcwd()))\r\n\t\t\tos.chdir(\"..\")\r\n\treturn files", "def get_files_paths(self):\n return self.__files_paths", "def _get_target_files(self) -> List[Path]:\n repo = get_git_repo()\n submodules = repo.submodules # type: ignore\n submodule_paths = [\n self._fname_to_path(repo, submodule.path) for submodule in submodules\n ]\n\n # resolve given paths relative to current working directory\n paths = [p.resolve() for p in self._paths]\n if self._base_commit is not None:\n paths = [\n a\n for a in (self._status.added + self._status.modified)\n # diff_path is a subpath of some element of input_paths\n if any((a == path or path in a.parents) for path in paths)\n ]\n changed_count = len(paths)\n click.echo(f\"| looking at {unit_len(paths, 'changed path')}\", err=True)\n paths = [\n path\n for path in paths\n if all(\n submodule_path not in path.parents\n for submodule_path in submodule_paths\n )\n ]\n if len(paths) != changed_count:\n click.echo(\n f\"| skipping files in {unit_len(submodule_paths, 'submodule')}: \"\n + \", \".join(str(path) for path in submodule_paths),\n err=True,\n )\n\n # Filter out ignore rules, expand directories\n self._ignore_rules_file.seek(0)\n patterns = Parser(self._base_path).parse(self._ignore_rules_file)\n\n file_ignore = FileIgnore(\n base_path=self._base_path, patterns=patterns, target_paths=paths\n )\n\n walked_entries = list(file_ignore.entries())\n click.echo(\n f\"| found {unit_len(walked_entries, 'file')} in the paths to be scanned\",\n err=True,\n )\n filtered: List[Path] = []\n for elem in walked_entries:\n if elem.survives:\n filtered.append(elem.path)\n\n skipped_count = len(walked_entries) - len(filtered)\n if skipped_count:\n click.echo(\n f\"| skipping {unit_len(range(skipped_count), 'file')} based on path ignore rules\",\n err=True,\n )\n\n relative_paths = [path.relative_to(self._base_path) for path in filtered]\n\n return relative_paths", "def get_image_path(raw_input_dir: str) -> list:\n result = []\n for root, dirs, files in os.walk(raw_input_dir):\n for file in files:\n result.append(os.path.join(root, file))\n return result", "def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]", "def filepaths(self):\n pass", "def get_sources(config, base_dir, exclude_submodules=False):\n for i, source in enumerate(config.sources_under(abspath(base_dir))):\n if i != 0:\n print\n log.info(\"# source %s (%s)\", source.nicedir,\n ' '.join(source.info))\n source.get(exclude_submodules)", "def get_files(self) -> list:\n files = []\n for file in os.listdir(self.root):\n if file.endswith(f\".{self.suffix}\"):\n files.append(os.path.join(self.root, file))\n return files", "def getContentFiles():\n contentFiles = []\n for contentDir, subDirs, filenames in os.walk(sourceDir, followlinks=True):\n if shouldIgnore(contentDir):\n subDirs[:] = []\n continue\n for filename in filenames:\n if not shouldIgnore(filename):\n cf = ContentFile(os.path.join(contentDir, filename))\n log(`cf.path`)\n contentFiles.append(cf)\n return contentFiles", "def _get_file_paths(self, ignored_exts: Optional[Set[str]]) -> List[str]:\n dir_path = os.path.join(self._target_dir, '**')\n all_paths = glob.glob(dir_path, recursive=True)\n if ignored_exts is None:\n return [p for p in all_paths if os.path.isfile(p)]\n file_paths = [p for p in all_paths if self._extr_ext(p) not in ignored_exts]\n return [p for p in file_paths if os.path.isfile(p)]", "def discover(self):\n ids = []\n for f in os.listdir(self.dirname):\n if self.file_prefix in f:\n ids.append(self.inv_filename(f))\n return sorted(ids)", "def files_in_dir(path):\n return os.listdir(path)", "def pdbfile_list():\n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def glob_paths(self, name, source, pattern, test_data=()):\n assert isinstance(source, config_types.Path)\n result = self._run(\n name, ['glob', source, pattern],\n lambda: self.test_api.glob_paths(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(*x.split(self.m.path.sep))\n for x in result.stdout.splitlines()]\n result.presentation.logs[\"glob\"] = map(str, ret)\n return ret", "def get_c_files(path):\n clist = []\n for file in os.listdir(path):\n if file.endswith(\".cc\") or file.endswith(\".c\"):\n clist.append(\"%s/%s\" % (path, file))\n return clist", "def get_all_fullpaths(self):\n files = []\n for mf in self.manifests:\n files.extend(self.manifests[mf].get_fullpaths())\n return files", "def get_my_files():\n return [file for file in os.listdir(os.getcwd()) if os.path.isfile(file)]", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def _find_files(root_dir, should_include):\n paths = [] # Return value.\n\n is_module = lambda path: path.endswith(\".py\")\n\n # os.walk() is new in Python 2.3\n # http://docs.python.org/library/os.html#os.walk\n for dir_path, dir_names, file_names in os.walk(root_dir):\n new_paths = [os.path.join(dir_path, file_name) for file_name in file_names]\n new_paths = filter(is_module, new_paths)\n new_paths = filter(should_include, new_paths)\n paths.extend(new_paths)\n\n return paths", "def find_data_files_distutils(self, package, src_dir):\n from glob import glob\n import os\n from distutils.util import convert_path\n\n globs = (self.package_data.get('', [])\n + self.package_data.get(package, []))\n files = []\n for pattern in globs:\n # Each pattern has to be converted to a platform-specific path\n filelist = glob(os.path.join(src_dir, convert_path(pattern)))\n # Files that match more than one pattern are only added once\n files.extend([fn for fn in filelist if fn not in files\n and (os.path.isfile(fn) or os.path.islink(fn))])\n return files", "def list_of_files(path):\r\n files_list=[]\r\n path = os.path.abspath(path)\r\n\r\n #if the path is a file name, returns a list of a single file name\r\n if os.path.isfile(path):\r\n files_list.append(path)\r\n #if the path is a directory name, returns a list of all the file names anded with .asm\r\n else:\r\n for file in os.listdir(path):\r\n if file.endswith(\".asm\"):\r\n files_list.append(os.path.join(path, file))\r\n return files_list", "def files_in_dir(root_dir):\n file_set = set()\n\n for dir_, _, files in os.walk(root_dir):\n for file_name in files:\n rel_dir = os.path.relpath(dir_, root_dir)\n rel_file = os.path.join(rel_dir, file_name)\n file_set.add(rel_file)\n\n return [Path(PureWindowsPath(f)) for f in file_set]", "def _get_files_list(self):\n ts_filepaths = []\n conn_filepaths = []\n ts_filepaths_from_dir = sorted(os.listdir(self.ts_dir))\n conn_filepaths_from_dir = sorted(os.listdir(self.conn_dir))\n for sub_id in self.ids:\n for ts_file in ts_filepaths_from_dir:\n if sub_id in ts_file:\n ts_filepaths += [os.path.join(self.ts_dir, ts_file)]\n ts_filepaths_from_dir.remove(ts_file)\n break\n for conn_file in conn_filepaths_from_dir:\n if sub_id in conn_file:\n conn_filepaths += [os.path.join(self.conn_dir, conn_file)]\n conn_filepaths_from_dir.remove(conn_file)\n break\n\n return ts_filepaths, conn_filepaths", "def __get_sources__(self):\n\n # Let's go to the Apt temporal dir.\n os.chdir(self.conf['AptTmp'])\n\n # Define a global Source file, all the *_Sources files are going to be in this file.\n global_sources_file = open(self.conf['CodeName'] + '_Sources', 'w')\n\n\t\t# The main/debian-installer is in main, so remove it.\n\t\tcomponents = self.conf['Components']\n\t\tif 'main/debian-installer' in components:\n\t\t\tcomponents.remove('main/debian-installer')\n\n # For every component defined...\n for component in components:\n # Download the Packages.gz file\n file = self.__get_packages_file__(self.conf[\"Mirror\"], \\\n \"%s_%s_Sources\" % (self.conf['CodeName'], component), \\\n component, \"source\" + \"/Sources.gz\")\n\n # \"cat\" it into the global_packages_file\n for line in file:\n print >>global_sources_file, line,\n file.close()\n\n\t\tglobal_sources_file.close()\n\t\treturn open(self.conf['CodeName'] + '_Sources', 'r')", "def GetAllFilepaths(root_directory):\n path_list = []\n for dirpath, _, filenames in os.walk(root_directory):\n for filename in filenames:\n path_list.append(os.path.abspath(os.path.join(dirpath, filename)))\n return path_list", "def get_directories():\n # get current working dir\n directory = os.getcwd()\n # list of dir to look in repo for files\n directories = [\n directory,\n os.path.expanduser(os.path.join(directory, 'src')),\n os.path.expanduser(os.path.join(directory, 'tests'))\n ]\n return directories", "def get_file_list(start):\n valid_files = []\n for root, dirs, files in os.walk(start):\n for name in files:\n if name[-5:] == \".conf\":\n valid_files.append(os.path.join(root,name))\n return valid_files", "def files(self) -> Generator[Path, None, None]:\n return Path(self.package).resolve(strict=True).glob(self.glob)", "def get_target_files(self, src_dir, src_pattern):\n return File().get_target_files(src_dir, src_pattern)", "def fullpathlist(path):\n try:\n return [os.path.join(path, filename) for filename in os.listdir(path)]\n except OSError:\n return []", "def files(self):\r\n files = []\r\n for path in self.paths:\r\n if os.path.isdir(path):\r\n files.extend(glob.glob(os.path.join(path, f'*{self.ext}')))\r\n else:\r\n files.extend(glob.glob(path))\r\n return list(set(self.get_pattern(fname) for fname in files))", "def find_files(self, destination):\n for root, dirs, files in os.walk(destination):\n for f in files:\n if (f.endswith('.scss') or f.endswith('.sass')) and not f.startswith('_'):\n yield os.path.join(root, f)", "def list_filenames(self):\n l = []\n for path, dirs, files in os.walk(self.archive_path):\n for file in files:\n l.append(os.path.relpath(os.path.join(path,file),self.archive_path))\n l.sort()\n return l", "def pdbfile_list():\n \n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def get_files(self) -> tp.Iterable[str]:\n return os.listdir(self.path)", "def _get_sources_and_includes(dirs_list):\n sources = []\n includes = []\n for directory in dirs_list:\n source_dir = join(os.path.dirname(os.path.realpath(__file__)),\n directory, 'src')\n for source in os.listdir(source_dir):\n sources.append(join(source_dir, source))\n includes.append(join(directory, 'include'))\n return sources, includes", "def get_req_file_list_pvd(source_pvd):\n flist = []\n flist.append(source_pvd)\n\n src_dir = os.path.dirname(source_pvd)\n source_tree = ET.parse(source_pvd)\n elems = source_tree.findall('.//DataSet')\n for e in elems:\n subfile = e.get('file')\n sub_fullpath = os.path.join(src_dir, subfile)\n sub_full_dir = os.path.dirname(sub_fullpath)\n flist.append(sub_fullpath)\n sub_tree = ET.parse(sub_fullpath)\n subelems = sub_tree.findall('.//Piece')\n for e2 in subelems:\n leaf = e2.get('Source')\n leaf_path = os.path.join(sub_full_dir, leaf)\n flist.append(leaf_path)\n return flist", "def GetSrc():\n return os.path.abspath(os.path.join(_THIS_DIR, os.pardir, os.pardir,\n os.pardir))", "def source_list(self):\n return self._source_list", "def source_list(self):\n return self._source_list", "def local_paths(self) -> List[Path]:\n return self._local_paths", "def orig_filepath_list(filename_list, src_path):\n orig_filepaths = list([])\n i = 0\n for filename in filename_list:\n orig_filepaths.append(src_path + filename_list[i])\n i += 1\n return orig_filepaths", "def BuildOldFilesList(self, source_file_name):\n default_source = os.path.join(self._tool_dir, source_file_name)\n if os.path.exists(default_source):\n file_list = open(default_source).readlines()\n else:\n print 'WARNING: No default %s list found at %s' % (source_file_name,\n default_source)\n file_list = []\n file_list = [f.strip() for f in file_list]\n file_list.extend(self.GetExtraFiles(self.options.extra_archive_paths,\n source_file_name))\n file_list = archive_utils.ExpandWildcards(self._build_dir, file_list)\n return file_list", "def get_replay_source_helper_paths(self):\n\n if self.replay_source is None:\n return None\n\n paths = []\n classes = self.get_helpers_classes()\n\n base_path = self.base_folder_path + \"/data/replay_images/\" + self.replay_source + \"/\"\n for hc in classes:\n current_paths = []\n for c in hc:\n path = base_path + str(c).zfill(2) + \".tfrecord\"\n current_paths.append(path)\n paths.append(current_paths)\n return paths", "def path_generator(initial_root):\n for root, dirs, files in os.walk(initial_root):\n paths = [os.path.join(root, name) for name in files]\n return paths", "def get_replay_source_no_helper_paths(self):\n paths = []\n classes = self.get_replay_classes_no_helper()\n base_path = self.base_folder_path + \"/data/replay_images/\" + self.replay_source + \"/\"\n\n \n for c in classes:\n full_path = base_path + str(c).zfill(2) + \".tfrecord\"\n paths.append(full_path)\n \n return paths", "def sources(self):\n return self._sources.keys()", "def expand_files(self, recursive=True, include_buildfile=True):\r\n\r\n files = []\r\n\r\n def _expand(target):\r\n files.extend([os.path.abspath(os.path.join(target.target_base, s))\r\n for s in (target.sources or [])])\r\n if include_buildfile:\r\n files.append(target.address.buildfile.full_path)\r\n if recursive:\r\n for dep in target.dependencies:\r\n if isinstance(dep, TargetWithSources):\r\n _expand(dep)\r\n elif hasattr(dep, 'address'):\r\n # Don't know what it is, but we'll include the BUILD file to be paranoid\r\n files.append(dep.address.buildfile.full_path)\r\n\r\n _expand(self)\r\n return files", "def _get_code_files(self):\n for dirpath, dirnames, filenames in os.walk(self.CodesDirectory):\n for f in filenames:\n rel_name = path.join(dirpath, f)\n if f.endswith('.py'):\n yield (rel_name, 'Python')\n elif f.endswith('.pyx'):\n yield (rel_name, 'PyRex')\n elif f.endswith('.c'):\n yield (rel_name, 'C')\n else:\n pass", "def buildListOfFiles(searchGlob):\n return [fpath for fpath in glob2.iglob(searchGlob) if os.path.isfile(fpath)]", "def FindSources(env, dest, source, suffixes=None):\n for source_entry in env.Flatten(source):\n if type(source_entry) == str:\n # Search for matches for each source entry\n source_nodes = env.Glob(source_entry)\n else:\n # Source entry is already a file or directory node; no need to glob it\n source_nodes = [source_entry]\n for s in source_nodes:\n if str(s.__class__) == 'SCons.Node.FS.Dir':\n # Recursively search subdir. Since glob('*') doesn't match dot files,\n # also glob('.*').\n FindSources(env, dest, [s.abspath + '/*', s.abspath + '/.*'],\n suffixes)\n elif suffixes and s.suffix in suffixes:\n dest.add(s)", "def _deleted_sources(self):\r\n # We compute the list lazily.\r\n if self._lazy_deleted_sources is None:\r\n with self.context.new_workunit('find-deleted-sources'):\r\n if os.path.exists(self._analysis_file):\r\n products = self._analysis_parser.parse_products_from_path(self._analysis_file)\r\n buildroot = get_buildroot()\r\n old_sources = products.keys() # Absolute paths.\r\n self._lazy_deleted_sources = [os.path.relpath(src, buildroot) for src in old_sources\r\n if not os.path.exists(src)]\r\n else:\r\n self._lazy_deleted_sources = []\r\n return self._lazy_deleted_sources" ]
[ "0.8271542", "0.77901995", "0.75900644", "0.73890495", "0.7383353", "0.7309688", "0.7303101", "0.7151451", "0.7086724", "0.70593715", "0.7048522", "0.704018", "0.6996113", "0.69825387", "0.69230485", "0.68788785", "0.6873243", "0.6849214", "0.6813808", "0.68076485", "0.67865753", "0.67446584", "0.67353475", "0.67315847", "0.672391", "0.6723616", "0.67073435", "0.66992784", "0.6699192", "0.66971093", "0.6687404", "0.6641875", "0.66319954", "0.6610068", "0.6607285", "0.65586054", "0.65551114", "0.65456426", "0.65455806", "0.65364087", "0.6526681", "0.6524498", "0.6515302", "0.6507937", "0.6500629", "0.64796484", "0.6460579", "0.6449892", "0.64361084", "0.64217216", "0.64203876", "0.6419934", "0.64143556", "0.6400525", "0.6393577", "0.6379879", "0.6363405", "0.6350611", "0.63483965", "0.6330859", "0.63256955", "0.6315554", "0.6292819", "0.6286555", "0.62861", "0.6281244", "0.6279932", "0.6274722", "0.62700504", "0.626975", "0.6269229", "0.6265977", "0.626078", "0.6259138", "0.6256535", "0.62465227", "0.62461704", "0.6240754", "0.6239586", "0.62348086", "0.6231159", "0.62250596", "0.6219199", "0.61955065", "0.6193373", "0.6181007", "0.61806357", "0.61806357", "0.61693233", "0.61684054", "0.6168327", "0.6165969", "0.6165242", "0.616347", "0.61632043", "0.61546636", "0.61521316", "0.61488765", "0.6146249", "0.61431843" ]
0.6541307
39
Return the list of full paths for pictures and movies in source directory
def list_of_medias(args, sourcedir, recursive): files = list_of_files(sourcedir, recursive) return [_ for _ in files if is_media_within_dates(_, args.dates)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __return_movie_file_list(self, movie_path):\n movie_dir = movie_path.rsplit(\"/\",1)[0]\n movie_file_list =[]\n movie_extentionds = self.__movie_file_extensions(self.__file_extentions)\n for x in os.listdir(movie_dir):\n if x.rsplit(\".\",1)[-1]in movie_extentionds:\n movie_file_list.append(movie_dir+\"/\"+x)\t\t\n\t#USUNAC URL Z NAPISY24\n return movie_file_list", "def get_replay_source_helper_paths(self):\n\n if self.replay_source is None:\n return None\n\n paths = []\n classes = self.get_helpers_classes()\n\n base_path = self.base_folder_path + \"/data/replay_images/\" + self.replay_source + \"/\"\n for hc in classes:\n current_paths = []\n for c in hc:\n path = base_path + str(c).zfill(2) + \".tfrecord\"\n current_paths.append(path)\n paths.append(current_paths)\n return paths", "def get_replay_source_no_helper_paths(self):\n paths = []\n classes = self.get_replay_classes_no_helper()\n base_path = self.base_folder_path + \"/data/replay_images/\" + self.replay_source + \"/\"\n\n \n for c in classes:\n full_path = base_path + str(c).zfill(2) + \".tfrecord\"\n paths.append(full_path)\n \n return paths", "def readPlayerImageFiles(self):\n currentPath = os.path.dirname(os.path.abspath(__file__))\n listOfFileNames=[]\n for i in os.listdir(currentPath):\n if re.match(\"player\\_\\d+\",i): #i.endswith(\".gif\")\n listOfFileNames.append(currentPath+'/'+i)\n return listOfFileNames", "def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths", "def get_image_path(raw_input_dir: str) -> list:\n result = []\n for root, dirs, files in os.walk(raw_input_dir):\n for file in files:\n result.append(os.path.join(root, file))\n return result", "def get_all_image_paths(self):\n image_paths, image_labels = [], []\n for directory_name, subdirectory_list, file_list in os.walk(self.root_directory):\n for file_name in file_list:\n if file_name.endswith(('.jpg',)):\n image_paths.append(os.path.join(directory_name, file_name))\n # Translates labels to 0-26 as recommended in the exercise description\n image_labels.append(ord(directory_name[-1]) - 97)\n return image_paths, image_labels", "def find_photos(source_path, common_extensions=('JPG', 'CR2', 'ORF', 'ARW', 'TIFF', 'DNG'), ignore=[]):\n # combinedignored = re.compile('|'.join('(?:{0})'.format(x) for x in ignore))\n # use endswith , ignore must be a tuple then\n # if ignore and dirpath.endswith(ignore):\n # for duplication, at the end cll the same funciton\n\n source_files = list()\n\n for (dirpath, dirnames, filenames) in os.walk(source_path):\n for f in filenames:\n if f.upper().endswith(common_extensions):\n # source_files.append(os.path.join(dirpath, f))\n parent = os.path.basename(os.path.normpath(dirpath))\n source_files.append({'dir':dirpath,\n 'filename':f,\n 'parent_folder':parent})\n\n return source_files", "def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list", "def get_movies(path):\n movies_list = []\n \n \n for f in os.listdir(path):\n \n full_file_path = join(path,f)\n if isdir(full_file_path):\n \n movies_list.extend( get_movies(full_file_path) )\n \n elif isfile(full_file_path) and full_file_path[-3:] in util.extension:\n m = Movie(f, full_file_path)\n movies_list.append(m)\n \n return movies_list", "def getSourcePaths(self, makeGlyphs=True, makeKerning=True, makeInfo=True):\n paths = []\n for name in self.sources.keys():\n paths.append(self.sources[name][0].path)\n return paths", "def list_of_medias_ext(args, sourcedir):\n result = list()\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n fullname = os.path.join(sourcedir, basename)\n if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname):\n result.append(fullname)\n else:\n if is_media_within_dates(fullname, args.dates):\n result.append(fullname)\n return result", "def get_img_files(images, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img') \n locs = db.get_img_locs(images)\n titles = db.get_location_titles()\n returnval = []\n for image in images:\n loc = locs[image]\n if loc is None:\n raise ValueError('The image %s could not be found' % image)\n returnval.append(path.join(img_dir, titles[loc], str(image) + '.jpg'))\n return returnval", "def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths", "def list_of_files(sourcedir, recursive):\n result = list()\n if recursive is False:\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n result.append(os.path.join(sourcedir, basename))\n else:\n for root, dirs, files in os.walk(sourcedir):\n if '.nomedia' not in files:\n for basename in sorted_listdir(files):\n result.append(os.path.join(root, basename))\n return result", "def get_paths_list_from_folder(folder):\n names = os.listdir(folder)\n relative_paths = [os.path.join(folder, image_name) for image_name in names]\n return relative_paths", "def voicesPathsCollector(source_dir, gender_ids):\r\n # The list containing the IDs of the speakers which are the folder names\r\n person_ids_list = gender_ids\r\n\r\n # An empty list which it will contains voices paths which are in the section folders\r\n voices_paths_list = []\r\n\r\n # Iterating over a person speaker ids\r\n for person_id_idx in range(len(person_ids_list)):\r\n # Speaker path containing the chapter's directories\r\n speaker_path = Path.cwd().joinpath(source_dir, person_ids_list[person_id_idx])\r\n # The list containing the IDs of the sections by the current speaker which are the folder names\r\n sections_ids_list = os.listdir(speaker_path)\r\n\r\n # Iterating over a sections ids by the current speaker\r\n for sec_id_idx in range(len(sections_ids_list)):\r\n # Section path containing files\r\n section_path = Path.cwd().joinpath(speaker_path,sections_ids_list[sec_id_idx])\r\n # Iterating over files which are existing in section's folders\r\n for file in os.listdir(section_path):\r\n # Choose files with .flac extension\r\n if file.endswith(\".flac\"):\r\n\r\n voice_path = Path.cwd().joinpath(section_path, file)\r\n voices_paths_list.append(voice_path)\r\n\r\n return voices_paths_list", "def get_image_list(path: str) -> list:\n\n return list(os.path.join(path, f)\n for f in os.listdir(path)\n if f.endswith('.jpg'))", "def get_datapaths(input_dir):\n image_paths = []\n assert os.path.isdir(input_dir), f\"{input_dir} is not existed\"\n\n for root, _, names in os.walk(input_dir):\n for name in names:\n path = os.path.join(root, name)\n image_paths.append(path)\n return image_paths", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def udimPaths(self):\n\t\ttry:\n\t\t\treturn [ textureFile( self.dirPath + a ) for a in os.listdir( self.dirPath ) if self.name + '.' in a ]\n\t\texcept:\n\t\t\treturn []", "def get_images_paths(path: str) -> List[str]:\n\n image_paths = []\n\n for folder in os.listdir(path):\n for file in os.listdir(os.path.join(f\"{path}/{folder}\", \"images\")):\n image_paths.append(f\"{path}/{folder}/images/{file}\")\n\n return image_paths", "def get_imlist(path):\n\treturn [os.path.join( path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def get_library_content(self):\n from glob import glob\n try:\n os.path.isdir(self.source)\n lst = glob(self.source + '/*')\n except TypeError:\n lst = self.source\n dircheck = True\n while dircheck is True:\n dircheck = False\n newlst = []\n for entry in lst:\n if os.path.isdir(entry):\n newlst.extend(glob(entry + '/*'))\n dircheck = True\n else:\n newlst.append(entry)\n lst = newlst\n return lst", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list", "def get_source_files(self):\n return [\n path.as_posix()\n for path in _Path(self.src_dir).rglob(\"*\")\n if not path.is_dir()\n ] + [\n (path / \"CMakeLists.txt\").as_posix()\n for path in _PurePath(self.src_dir).parents\n ]", "def source_paths(self):\n paths = self.config.get('static_dirs')\n if paths:\n return paths\n return [self.config.get('static_dir')]", "def Get_Source(self):\r\n source_directory = filedialog.askdirectory(initialdir = initial_dir, title=\"Select source folder\")\r\n global source\r\n source = source_directory \r\n self.txt_srcPath.delete(0, 'end')\r\n self.txt_srcPath.insert(0, str(source_directory))\r\n file_type = self.txt_fileType.get()\r\n hrs= int(self.txt_fileAge.get())\r\n file_age = datetime.timedelta(hours=hrs)\r\n global filenames_list\r\n filenames_list = []\r\n global files_list\r\n files_list = []\r\n global ft_list\r\n ft_list = []\r\n for f in glob.iglob(os.path.join(source, file_type)):\r\n files_list = [os.path.splitext(f)[0]]\r\n filenames_list.append(f) \r\n return(filenames_list, source, files_list)", "def darkfiles(cam):\n return fullpathlist(darkpath(cam))", "def get_imlist(path):\n return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list", "def get_filepaths(extract_dir):\n\n index = []\n labels = []\n _extract_dir = os.path.join(extract_dir, 'UCF-101')\n for folder in os.listdir(_extract_dir):\n labels.append(folder)\n folderpath = os.path.join(_extract_dir, folder)\n\n if not os.path.isdir(folderpath):\n continue\n\n for filename in os.listdir(folderpath):\n if 'avi' not in filename:\n continue\n\n if filename[0] == '.':\n continue\n\n filepath = os.path.join(folderpath, filename)\n\n if os.path.exists(filepath):\n index.append(filepath)\n else:\n print(filepath)\n return index, labels", "def getVideosPath(self):\r\n videoTypes = [\r\n ('MP4 files', '*.mp4'),\r\n ('3GP files', '*.3gp'),\r\n ('WMV files', '*.wmv'),\r\n ('FLV files', '*.flv'),\r\n ('AVI files', '*.avi'),\r\n ]\r\n global filenames\r\n filenames = askopenfilenames(title=\"Select video files\", multiple=True, )", "def get_files():\n\n img_dir = '../ADE20K_2016_07_26/full_data/images/validation/'\n sem_dir = '../ADE20K_2016_07_26/full_data/annotations/validation/'\n ins_dir = '../ADE20K_2016_07_26/full_data/annotations_instance/validation/'\n\n img_files = os.listdir(img_dir)\n sem_files = os.listdir(sem_dir)\n ins_files = os.listdir(ins_dir)\n \n img_files = [ os.path.join(img_dir,item) for item in img_files ]\n sem_files = [ os.path.join(sem_dir,item) for item in sem_files ]\n ins_files = [ os.path.join(ins_dir,item) for item in ins_files ]\n \n img_files.sort()\n sem_files.sort()\n ins_files.sort()\n \n return img_files, sem_files, ins_files", "def demo_paths(self):\n base_path = os.path.join(self.module.__path__[0], 'demo')\n paths = []\n if os.path.isdir(base_path):\n for item in os.listdir(base_path):\n # TODO: support examples which is not auto-loaded\n if not os.path.isdir(os.path.join(base_path, 'examples')):\n paths.append(os.path.join(base_path, item))\n return paths", "def _locate_images(self):\r\n extensions = '|'.join(self.valid_extensions)\r\n extension_re = re.compile('.+\\.(%s)$' % extensions, re.IGNORECASE)\r\n files = sorted(os.listdir(self.path))\r\n\r\n images = []\r\n for root, dirs, files in os.walk(self.path, followlinks=self.config['follow_links']):\r\n for filename in sorted(files):\r\n if not filename.startswith('.') and extension_re.match(filename):\r\n images.append(Image(path=os.path.join(root, filename), config=self.config))\r\n if not self.config['recursive']:\r\n break\r\n\r\n if not images:\r\n raise SourceImagesNotFoundError(self.path)\r\n\r\n images = sorted(images, reverse=self.config['algorithm_ordering'][0] != '-')\r\n\r\n return images", "def get_all_sources(remit):\n if remit == 'panzer' or remit == 'pandoc':\n os.chdir('source-'+remit)\n sourcelist = [name for name in os.listdir(\".\") if os.path.isdir(name)]\n os.chdir('..')\n else:\n # get the maximal list of sources for a diff\n pandoc_list = get_all_sources('pandoc')\n panzer_list = get_all_sources('panzer')\n sourcelist = list(set(pandoc_list+panzer_list))\n sourcelist.sort()\n return sourcelist", "def collect_train_paths(self):\n\n image_paths = []\n annotation_paths = []\n\n n_images = 10000\n for i in range(1, n_images + 1):\n added = False\n for extension in ['jpg', 'png']:\n image_path = os.path.join(self.folder,\n f'ImagesPart{(i - 1) // 5000 + 1}',\n f'tr_img_{i:05}.{extension}')\n if os.path.exists(image_path):\n image_paths.append(image_path)\n added = True\n break\n if added:\n annotation_paths.append(\n os.path.join(self.folder, 'train_gt_t13', f'tr_img_{i:05}.txt')\n )\n else:\n print(f'Could not find: {image_path[:-3]}*')\n\n return image_paths, annotation_paths", "def get_image_path(source_path):\n\n split = source_path.split('\\\\')\n # get filename\n filename = split[-1].lstrip()\n # get folder name\n folder = split[-3]\n # get full data path\n current_path = folder + '/IMG/' + filename\n return current_path", "def get_feature_paths(start_dir, extensions = ['dcm']):\n if start_dir is None:\n start_dir = os.getcwd()\n img_paths = []\n for roots,dirs,files in os.walk(start_dir):\n for name in files:\n for e in extensions:\n if name.endswith('.' + e):\n img_paths.append(roots + '/' + name)\n img_paths.sort()\n return img_paths", "def get_filepaths(self):\n image_filepaths = set()\n for one_dir in self.dirs:\n for root, dirnames, filenames in os.walk(one_dir):\n for filename in filenames:\n if re.search(r\"\\.(jpg|jpeg|png|bmp|tiff)$\", filename):\n image_filepaths.add(os.path.join(root, filename))\n image_filepaths = sorted(list(image_filepaths))\n return image_filepaths", "def images_path(self):\n return os.path.join(self.extracted_path, \"images\")", "def get_subdirs(src_dir):\n img_dirs = sorted(next(os.walk(src_dir))[1])\n subdirs = [src_dir + img_dir for img_dir in img_dirs]\n return subdirs", "def get_lst_images(file_path):\n return [i for i in os.listdir(file_path) if i != '.DS_Store']", "def listdir(self, name, source, test_data=()):\n assert isinstance(source, config_types.Path)\n self.m.path.assert_absolute(source)\n result = self._run(\n name, ['listdir', source],\n lambda: self.test_api.listdir(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(x) for x in result.stdout.splitlines()]\n result.presentation.logs['listdir'] = map(str, ret)\n return ret", "def source_list(self):\n return [g[\"name\"] for g in self._galleries]", "def collect_train_paths(self):\n\n image_paths = []\n annotation_paths = []\n n_images = 7200\n for i in tqdm(range(1, n_images + 1)):\n added = False\n for extension in ['jpg', 'png']:\n image_path = os.path.join(self.folder,\n f'ch8_training_images_{(i - 1) // 1000 + 1}',\n f'img_{i}.{extension}')\n if os.path.exists(image_path):\n image_paths.append(image_path)\n added = True\n break\n if added:\n annotation_paths.append(\n os.path.join(self.folder, 'ch8_training_localization_transcription_gt_v2',\n f'gt_img_{i}.txt')\n )\n else:\n logging.warning(f'Could not find: {image_path[:-3]}*')\n return image_paths, annotation_paths", "def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(), self.target_base)\r\n for src in self.sources:\r\n yield os.path.join(abs_target_base, src)", "def all_image_paths(self):\n self.labels = [i for i in (self.get_immediate_subdirectories(self.root_dir))\n if not i.startswith('.')]\n\n for root, subFolders, files in os.walk(self.root_dir):\n files = [i for i in files if not i.startswith('.')]\n files = files[:self.img_num] # hard coded - will not read in\n for i in files:\n self.all_files.append(os.path.abspath(root) + '/'.join(subFolders) + '/' + i)", "def filepaths(self):\n pass", "def full_path(startPath,files):\n\n files = list_strings(files)\n base = os.path.split(startPath)[0]\n return [ os.path.join(base,f) for f in files ]", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def find_candidate_images(images_path):\n images = []\n for root, dirs, files in os.walk(images_path):\n for name in files:\n file_path = os.path.abspath(os.path.join(root, name))\n if (os.path.splitext(name)[1]).lower() in [\".jpg\", \".png\", \".jpeg\"]:\n images.append(file_path)\n return images", "def CollectImageFilenames(self):\n # Match all image extensions but not the filenmae of the of beamer pdf\n regex_img = re.compile(\n r'^(?!{}).*\\.(jpg|png|pdf)'.format(self._filename.replace('.tex', '')))\n # regex_img = re.compile(r'^(?!test)'.format(self._filename.replace('.tex', '')))\n files = [f for f in os.listdir(os.getcwd())\n if regex_img.search(f)]\n return files", "def prep_files(app):\n smali_paths = []\n start = time.time()\n \n for root, dirs, files in os.walk(app, topdown=False):\n for name in files:\n if name[-6:] == \".smali\":\n smali_paths.append(str(os.path.join(root, name)))\n \n return smali_paths", "def flatfiles(cam):\n return fullpathlist(flatpath(cam))", "def get_path_names(directory):\n paths_without_source = set()\n paths = glob.glob(source + \"**/*.*\", recursive=True)\n for p in paths:\n paths_without_source.add(p.replace(directory, \"\", 1))\n\n return paths_without_source", "def get_image_paths(self):\n return self.image_paths", "def get_image_paths(self):\n return self.image_paths", "def get_dir_and_file_list(path):\r\n dList = os.listdir(path)\r\n dirList = []\r\n fileList = []\r\n\r\n for item in dList:\r\n \r\n if os.path.isdir(os.path.join(path, item)):\r\n dirList.append(item)\r\n elif os.path.isfile(os.path.join(path, item)):\r\n if any(image_type in item.lower() for image_type in image_types):\r\n preview = image_preview(os.path.join(path, item))\r\n fileList.append((item, preview))\r\n else:\r\n fileList.append((item, None))\r\n\r\n return dirList, fileList", "def _get_file_paths(self):\n return [os.path.join(self.path, self.mode, 'waveforms', file_name + '.npy') for file_name in self.file_names]", "def collect_project_source_files():\n source_files = glob.glob(PROJECT_SOURCE_FILES_FOLDER + '/**/*.py', recursive=True)\n # Insert root main.py at the beginning.\n source_files.insert(0, os.path.join(PROJECT_ROOT_FOLDER, 'main.py'))\n return list(map(lambda path: posixpath.join(*path.split('\\\\')), source_files))", "def findpaths(path):\n print('[INFO] Searching for .png images in ', path)\n frame_paths = []\n frame_to_path_dict = {}\n path_to_frame_dict = {}\n for root, dirs, files in os.walk(path, topdown=False):\n for name in files:\n if name.find('.png') != -1:\n frame_path = os.path.join(root, name)\n # NOTE: may want to change to deal with generic file names\n match = re.search(r'(?P<video_id>\\d+)_(?P<frame_id>\\d+).png', name)\n # video_id = int(match.group('video_id'))\n frame_id = int(match.group('frame_id'))\n frame_paths.append(frame_path)\n frame_to_path_dict[frame_id] = frame_path\n path_to_frame_dict[frame_path] = frame_id\n frame_paths_sorted = sorted(frame_paths, key=lambda x: int(path_to_frame_dict[x]))\n print('[INFO] %i frames located ' % (len(frame_paths)))\n return frame_paths_sorted, frame_to_path_dict, path_to_frame_dict", "def scandir(path_):\n return os.listdir", "def get_paths(input_folder):\n list_files = []\n conll_folder = glob.glob(input_folder + '/*.json')\n \n for filename in conll_folder:\n list_files.append(filename)\n\n return list_files", "def list_sources(config, base_dir, verbose=False):\n for source in config.sources_under(abspath(base_dir)):\n if verbose:\n print(\"# %s (%s)\" % (source.nicedir, ' '.join(source.info)))\n else:\n print(source.nicedir)", "def get_lists_in_dir(dir_path):\n image_list = []\n\n for filename in glob.glob(dir_path + '/*.jpg'):\n image_list.append(filename)\n return image_list", "def list_dir(self, path):", "def load_video_paths(dir):\n VIDEO_EXTENSIONS = ['.mov', '.MOV', '.mp4']\n video_paths = []\n\n # traverse directory to obtain only paths to videos\n for dir_name, _, paths in sorted(os.walk(os.path.expanduser(dir))):\n for path in paths:\n if any(path.endswith(extensions) for extensions in VIDEO_EXTENSIONS):\n video_paths.append(os.path.expanduser(dir_name + '/' + path))\n\n return video_paths", "def parse_dir_imgs(root_pth):\n def visit(imgpths, pth, names):\n # Appends detected image filenames to a list.\n imgpths.extend([os.path.join(pth, name) for name in names\n if os.path.splitext(name)[1].lower() in img_exts])\n # Walk down directory tree and get the image file paths\n imgpaths = []\n for dp, foo, names in os.walk(root_pth):\n visit(imgpaths, dp, names)\n # Make lowercased list of imagefilenames\n imgnames = [os.path.split(pth)[1].lower() for pth in imgpaths]\n return imgnames, imgpaths", "def collect_image_files():\n negs = [] # Non image files found\n for filename in os.listdir('.'):\n if filename.lower().endswith('.jpg') or filename.lower().\\\n endswith('.jpeg'):\n jpg_files.append(filename)\n elif filename.lower().endswith('.gif'):\n gif_files.append(filename)\n elif filename.lower().endswith('.png'):\n png_files.append(filename)\n else:\n negs.append(filename)\n return negs", "def get_file_list() -> List[str]:\n filenames = []\n os.makedirs(\"sequence\", exist_ok=True)\n for file in glob.glob(\"sequence/*.smp\"):\n filenames.append(file.replace(\"sequence/\", \"\"))\n return filenames", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def Sources():\n return _sources", "def GetSongFilenames():\n\n\t## Loop through each directory\n\tsong_files = []\n\tfor root, dirs, fnames in os.walk(\"_data\\\\fma_small\\\\\"):\n\t\t\n\t\t## Skip the first level\n\t\tif root == \"_data\\\\fma_small\\\\\":\n\t\t\tcontinue\n\n\t\t## Otherwise collect the files, appending\n\t\t## the root path.\n\t\tsong_files += [root+\"\\\\\"+f for f in fnames]\n\n\treturn song_files", "def get_images(fish):\n fish_dir = TRAIN_DIR+'{}'.format(fish)\n images = [fish+'/'+im for im in os.listdir(fish_dir)]\n return images", "def movie_path_list_fixture(tmpdir_factory):\n path_list = []\n\n parent_tmpdir = tmpdir_factory.mktemp(\"movies_for_test\")\n rng = np.random.default_rng(172312)\n this_dir = tempfile.mkdtemp(dir=parent_tmpdir)\n this_path = tempfile.mkstemp(dir=this_dir, suffix=\".h5\")[1]\n with h5py.File(this_path, \"w\") as out_file:\n out_file.create_dataset(\"data\", data=rng.random((12, 512, 512)))\n\n path_list.append(this_path)\n\n this_dir = tempfile.mkdtemp(dir=parent_tmpdir)\n this_dir = pathlib.Path(this_dir) / \"processed\"\n this_dir.mkdir()\n this_path = this_dir / \"concat_31Hz_0.h5\"\n with h5py.File(this_path, \"w\") as out_file:\n out_file.create_dataset(\"data\", data=rng.random((12, 512, 512)))\n path_list.append(str(this_path.resolve().absolute()))\n\n this_dir = tempfile.mkdtemp(dir=parent_tmpdir)\n this_dir = pathlib.Path(this_dir) / \"processed\"\n this_dir.mkdir()\n this_path = this_dir / \"motion_corrected_video.h5\"\n with h5py.File(this_path, \"w\") as out_file:\n out_file.create_dataset(\"data\", data=rng.random((12, 512, 512)))\n path_list.append(str(this_path.resolve().absolute()))\n\n yield path_list\n\n for this_path in path_list:\n this_path = pathlib.Path(this_path)\n if this_path.is_file():\n this_path.unlink()", "def get_all_images_from_filesystem():\r\n\r\n logging.debug('get_all_images_from_filesystem()')\r\n\r\n dir_path = os.path.join(os.environ['TEMP'],'WarietyWallpaperImages')\r\n all_full_image_paths = []\r\n for my_file in os.listdir(dir_path):\r\n if os.path.isfile(os.path.join(dir_path, my_file)):\r\n all_full_image_paths.append(os.path.join(dir_path, my_file))\r\n return all_full_image_paths", "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]", "def search_images(\n current_dir: str,\n exts={\"jpg\", \"png\", \"jpeg\", \"gif\"}\n) -> typing.Iterable[typing.Tuple[str, str]]:\n for root, _, files in os.walk(current_dir):\n for file_name in files:\n ext = file_name.rsplit('.', 1)[-1].lower()\n if ext in exts:\n yield os.path.join(root, file_name), file_name", "def _fetch_all_images(self, path) -> List[str]:\n files_all = []\n\n for ext in self.exts:\n files_all.extend(glob.glob(join(path, ext)))\n\n return files_all", "def get_custom_imgs(custom_image_path):\n img_paths = []\n for path, dir, filenames in os.walk(custom_image_path):\n for filename in filenames:\n img_paths.append(os.path.join(path, filename))\n break\n return img_paths", "def get_paths(self):\n return (self.world_fpath, self.subj_fpath, self.peds_fpath)", "def load_paths(dir):\n IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.bmp', '.BMP']\n image_paths = []\n\n # traverse directory to obtain only paths to images\n for dir_name, _, paths in sorted(os.walk(os.path.expanduser(dir))):\n for path in paths:\n if any(path.endswith(extensions) for extensions in IMG_EXTENSIONS):\n image_paths.append(os.path.expanduser(dir_name + '/' + path))\n\n return image_paths", "def getMediaFiles(path):\n fileList = getMediaFileList(path)\n # dirList = getDirectoryList(path)\n\n # results = map(getMediaFiles, dirList)\n\n # for result in results:\n # fileList = fileList + result\n\n return fileList", "def get_images_path(file_path):\n assert os.path.isfile(file_path)\n\n images_path = []\n image_dir = os.path.dirname(file_path)\n\n with open(file_path, 'r') as f:\n for line in f:\n line = line.strip()\n origin_path, annot_path = line.split(\" \")\n origin_path = os.path.join(image_dir, origin_path)\n annot_path = os.path.join(image_dir, annot_path)\n images_path.append([origin_path, annot_path])\n\n return images_path", "def fullpathlist(path):\n try:\n return [os.path.join(path, filename) for filename in os.listdir(path)]\n except OSError:\n return []", "def listImageFolder():\n #Note: Ignores files ending in ~ which is a backup/lock file\n return [f for f in os.listdir(imageFolder) if f[-1] is not '~']", "def filePaths(directory_with_files):\n\n # get a list of file names in directory\n list_of_files = os.listdir(directory_with_files) \n\n # join directory path and file name to get full paths to files\n filepaths = [os.path.join(directory_with_files, filename) for filename in list_of_files]\n\n return filepaths", "def findFiles(target, path):\r\n\tfiles = []\r\n\tlyst = os.listdir(path)\r\n\tfor element in lyst:\r\n\t\tif os.path.isfile(element):\r\n\t\t\tif target in element:\r\n\t\t\t\tfiles.append(path + os.sep + element)\r\n\t\telse:\r\n\t\t\tos.chdir(element)\r\n\t\t\tfiles.extend(findFiles(target, os.getcwd()))\r\n\t\t\tos.chdir(\"..\")\r\n\treturn files", "def fetch_path_from_dirs(list_of_search_dirs, key):\n outputs = []\n for d in list_of_search_dirs:\n this_search_path = os.path.join(d, \"*\" + key + \"*\")\n outputs.extend(glob.glob(this_search_path))\n return outputs", "def read_paths(path):\n images = [[] for _ in range(2)]\n for dirname, dirnames, _ in os.walk(path):\n for subdirname in dirnames:\n filepath = os.path.join(dirname, subdirname)\n for filename in os.listdir(filepath):\n try:\n imgpath = str(os.path.join(filepath, filename))\n images[0].append(imgpath)\n limit = re.findall('[0-9]+', filename)\n images[1].append(limit[0])\n except IOError as err:\n print(\"I/O error\")\n except:\n print(\"I/O error 2\")\n raise\n return images", "def source_dirs_files(fspath, fil=None):\r\n dirs = []\r\n files = []\r\n for child in fspath.listdir(fil=fil):\r\n if child.basename.startswith('.'):\r\n continue\r\n if child.check(dir=True):\r\n dirs.append(child)\r\n elif child.check(file=True):\r\n if child.ext in ['.pyc', '.pyo']:\r\n continue\r\n files.append(child)\r\n return sorted(dirs), sorted(files)", "def list_filenames(self):\n l = []\n for path, dirs, files in os.walk(self.archive_path):\n for file in files:\n l.append(os.path.relpath(os.path.join(path,file),self.archive_path))\n l.sort()\n return l", "def _path_files(self):\n\n if not os.path.exists(self.path):\n return None\n\n directory_content = os.listdir(self.path)\n files = []\n\n while len(directory_content) != 0:\n\n if not directory_content[0].startswith(self.path):\n directory_obj = os.path.join(self.path, directory_content[0])\n else:\n directory_obj = directory_content[0]\n\n if os.path.isfile(directory_obj):\n files.append(directory_obj)\n elif os.path.exists(directory_obj):\n temp_directory_content = os.listdir(directory_obj)\n for obj in temp_directory_content:\n directory_content.append(os.path.join(directory_obj, obj))\n directory_content.pop(0)\n\n return files", "def get_paths(input_folder: str) -> list[str]:\n\n return [f for f in os.listdir(input_folder) if f[-4:] == '.txt' and f[:3] != 'top']", "def list_selfplay_dirs(base_dir):\n\n model_dirs = [os.path.join(base_dir, x)\n for x in tf.io.gfile.listdir(base_dir)]\n return sorted(model_dirs, reverse=True)", "def _get_images(image_path):\n logger.debug(\"Getting images: '%s'\", image_path)\n if not os.path.isdir(image_path):\n logger.debug(\"Folder does not exist\")\n return None\n files = [os.path.join(image_path, f)\n for f in os.listdir(image_path) if f.lower().endswith((\".png\", \".jpg\"))]\n logger.debug(\"Image files: %s\", files)\n return files", "def getImages(self,Project=\"\"):\n #images = [\"image1.jpg\",\"image2.jpg\",\"image3.jpg\"]\n \n os.chdir(self.dataDir)\n images = glob.glob(\"*.png\")\n \n return images", "def get_directories():\n # get current working dir\n directory = os.getcwd()\n # list of dir to look in repo for files\n directories = [\n directory,\n os.path.expanduser(os.path.join(directory, 'src')),\n os.path.expanduser(os.path.join(directory, 'tests'))\n ]\n return directories" ]
[ "0.6780654", "0.664078", "0.66328037", "0.6586579", "0.6527416", "0.6516118", "0.6454078", "0.6445807", "0.6427995", "0.64033985", "0.6284854", "0.6242137", "0.62136245", "0.62066734", "0.61857104", "0.61600107", "0.61456895", "0.6120278", "0.61164725", "0.6079566", "0.607903", "0.60765576", "0.6073444", "0.607042", "0.606977", "0.6066481", "0.6065801", "0.6054336", "0.60413295", "0.6034053", "0.602889", "0.60250366", "0.60149413", "0.59982824", "0.59940577", "0.5989405", "0.59889436", "0.5983747", "0.5979806", "0.59707624", "0.5966403", "0.596542", "0.59614986", "0.59587055", "0.5949511", "0.59439516", "0.5937938", "0.5921194", "0.58973396", "0.5896181", "0.589426", "0.5888969", "0.587682", "0.58664894", "0.5864955", "0.58626944", "0.58616513", "0.58607423", "0.58607423", "0.58590317", "0.58499676", "0.5844197", "0.5838828", "0.58318096", "0.58122194", "0.5809824", "0.5807692", "0.579219", "0.5786775", "0.5778203", "0.5773751", "0.57713616", "0.57712257", "0.57620466", "0.57568944", "0.575689", "0.57562786", "0.57545584", "0.5733904", "0.5720712", "0.5720115", "0.5717183", "0.5706487", "0.5706117", "0.5703502", "0.5702218", "0.57015634", "0.56944364", "0.5692686", "0.5687115", "0.5684107", "0.56818515", "0.567799", "0.5671184", "0.56708336", "0.56700397", "0.5668775", "0.56685567", "0.5664542", "0.5658183", "0.5656091" ]
0.0
-1
Return the list of full paths for pictures and movies in source directory plus subdirectories containing media
def list_of_medias_ext(args, sourcedir): result = list() listdir = sorted_listdir(os.listdir(sourcedir)) if '.nomedia' not in listdir: for basename in listdir: fullname = os.path.join(sourcedir, basename) if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname): result.append(fullname) else: if is_media_within_dates(fullname, args.dates): result.append(fullname) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_of_files(sourcedir, recursive):\n result = list()\n if recursive is False:\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n result.append(os.path.join(sourcedir, basename))\n else:\n for root, dirs, files in os.walk(sourcedir):\n if '.nomedia' not in files:\n for basename in sorted_listdir(files):\n result.append(os.path.join(root, basename))\n return result", "def __return_movie_file_list(self, movie_path):\n movie_dir = movie_path.rsplit(\"/\",1)[0]\n movie_file_list =[]\n movie_extentionds = self.__movie_file_extensions(self.__file_extentions)\n for x in os.listdir(movie_dir):\n if x.rsplit(\".\",1)[-1]in movie_extentionds:\n movie_file_list.append(movie_dir+\"/\"+x)\t\t\n\t#USUNAC URL Z NAPISY24\n return movie_file_list", "def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list", "def getMediaFiles(path):\n fileList = getMediaFileList(path)\n # dirList = getDirectoryList(path)\n\n # results = map(getMediaFiles, dirList)\n\n # for result in results:\n # fileList = fileList + result\n\n return fileList", "def get_subdirs(src_dir):\n img_dirs = sorted(next(os.walk(src_dir))[1])\n subdirs = [src_dir + img_dir for img_dir in img_dirs]\n return subdirs", "def find_photos(source_path, common_extensions=('JPG', 'CR2', 'ORF', 'ARW', 'TIFF', 'DNG'), ignore=[]):\n # combinedignored = re.compile('|'.join('(?:{0})'.format(x) for x in ignore))\n # use endswith , ignore must be a tuple then\n # if ignore and dirpath.endswith(ignore):\n # for duplication, at the end cll the same funciton\n\n source_files = list()\n\n for (dirpath, dirnames, filenames) in os.walk(source_path):\n for f in filenames:\n if f.upper().endswith(common_extensions):\n # source_files.append(os.path.join(dirpath, f))\n parent = os.path.basename(os.path.normpath(dirpath))\n source_files.append({'dir':dirpath,\n 'filename':f,\n 'parent_folder':parent})\n\n return source_files", "def get_movies(path):\n movies_list = []\n \n \n for f in os.listdir(path):\n \n full_file_path = join(path,f)\n if isdir(full_file_path):\n \n movies_list.extend( get_movies(full_file_path) )\n \n elif isfile(full_file_path) and full_file_path[-3:] in util.extension:\n m = Movie(f, full_file_path)\n movies_list.append(m)\n \n return movies_list", "def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths", "def get_replay_source_helper_paths(self):\n\n if self.replay_source is None:\n return None\n\n paths = []\n classes = self.get_helpers_classes()\n\n base_path = self.base_folder_path + \"/data/replay_images/\" + self.replay_source + \"/\"\n for hc in classes:\n current_paths = []\n for c in hc:\n path = base_path + str(c).zfill(2) + \".tfrecord\"\n current_paths.append(path)\n paths.append(current_paths)\n return paths", "def get_image_path(raw_input_dir: str) -> list:\n result = []\n for root, dirs, files in os.walk(raw_input_dir):\n for file in files:\n result.append(os.path.join(root, file))\n return result", "def voicesPathsCollector(source_dir, gender_ids):\r\n # The list containing the IDs of the speakers which are the folder names\r\n person_ids_list = gender_ids\r\n\r\n # An empty list which it will contains voices paths which are in the section folders\r\n voices_paths_list = []\r\n\r\n # Iterating over a person speaker ids\r\n for person_id_idx in range(len(person_ids_list)):\r\n # Speaker path containing the chapter's directories\r\n speaker_path = Path.cwd().joinpath(source_dir, person_ids_list[person_id_idx])\r\n # The list containing the IDs of the sections by the current speaker which are the folder names\r\n sections_ids_list = os.listdir(speaker_path)\r\n\r\n # Iterating over a sections ids by the current speaker\r\n for sec_id_idx in range(len(sections_ids_list)):\r\n # Section path containing files\r\n section_path = Path.cwd().joinpath(speaker_path,sections_ids_list[sec_id_idx])\r\n # Iterating over files which are existing in section's folders\r\n for file in os.listdir(section_path):\r\n # Choose files with .flac extension\r\n if file.endswith(\".flac\"):\r\n\r\n voice_path = Path.cwd().joinpath(section_path, file)\r\n voices_paths_list.append(voice_path)\r\n\r\n return voices_paths_list", "def get_replay_source_no_helper_paths(self):\n paths = []\n classes = self.get_replay_classes_no_helper()\n base_path = self.base_folder_path + \"/data/replay_images/\" + self.replay_source + \"/\"\n\n \n for c in classes:\n full_path = base_path + str(c).zfill(2) + \".tfrecord\"\n paths.append(full_path)\n \n return paths", "def readPlayerImageFiles(self):\n currentPath = os.path.dirname(os.path.abspath(__file__))\n listOfFileNames=[]\n for i in os.listdir(currentPath):\n if re.match(\"player\\_\\d+\",i): #i.endswith(\".gif\")\n listOfFileNames.append(currentPath+'/'+i)\n return listOfFileNames", "def get_library_content(self):\n from glob import glob\n try:\n os.path.isdir(self.source)\n lst = glob(self.source + '/*')\n except TypeError:\n lst = self.source\n dircheck = True\n while dircheck is True:\n dircheck = False\n newlst = []\n for entry in lst:\n if os.path.isdir(entry):\n newlst.extend(glob(entry + '/*'))\n dircheck = True\n else:\n newlst.append(entry)\n lst = newlst\n return lst", "def get_all_image_paths(self):\n image_paths, image_labels = [], []\n for directory_name, subdirectory_list, file_list in os.walk(self.root_directory):\n for file_name in file_list:\n if file_name.endswith(('.jpg',)):\n image_paths.append(os.path.join(directory_name, file_name))\n # Translates labels to 0-26 as recommended in the exercise description\n image_labels.append(ord(directory_name[-1]) - 97)\n return image_paths, image_labels", "def source_paths(self):\n paths = self.config.get('static_dirs')\n if paths:\n return paths\n return [self.config.get('static_dir')]", "def get_paths_list_from_folder(folder):\n names = os.listdir(folder)\n relative_paths = [os.path.join(folder, image_name) for image_name in names]\n return relative_paths", "def create_directory_list(root_dir: str):\n if not os.path.exists(root_dir):\n raise FileNotFoundError(\"Directory {} does not exist\".format(root_dir))\n\n # List all directories associated to different videos.\n recording_path_list = [os.path.join(root_dir, f) for f in os.listdir(root_dir)]\n\n input_data_path = []\n for g in recording_path_list:\n # Append the different directories associated to different video frame intervals.\n input_data_path.extend([os.path.join(g, f) for f in os.listdir(g)])\n\n return input_data_path", "def get_dir_files(self, recursive=False):\n logging.info('Enumerating files under the source path (recursive=%s) ...', recursive)\n files = {}\n if not recursive:\n files[self.path_source] = [\n f for f in os.listdir(self.path_source) if os.path.isfile(os.path.join(self.path_source, f))\n ]\n else:\n for current_dir, sub_dirs, dir_files in os.walk(self.path_source):\n files[os.path.join(self.path_source, current_dir)] = [f for f in dir_files]\n\n return files", "def GetSongFilenames():\n\n\t## Loop through each directory\n\tsong_files = []\n\tfor root, dirs, fnames in os.walk(\"_data\\\\fma_small\\\\\"):\n\t\t\n\t\t## Skip the first level\n\t\tif root == \"_data\\\\fma_small\\\\\":\n\t\t\tcontinue\n\n\t\t## Otherwise collect the files, appending\n\t\t## the root path.\n\t\tsong_files += [root+\"\\\\\"+f for f in fnames]\n\n\treturn song_files", "def get_images_paths(path: str) -> List[str]:\n\n image_paths = []\n\n for folder in os.listdir(path):\n for file in os.listdir(os.path.join(f\"{path}/{folder}\", \"images\")):\n image_paths.append(f\"{path}/{folder}/images/{file}\")\n\n return image_paths", "def get_dir_and_file_list(path):\r\n dList = os.listdir(path)\r\n dirList = []\r\n fileList = []\r\n\r\n for item in dList:\r\n \r\n if os.path.isdir(os.path.join(path, item)):\r\n dirList.append(item)\r\n elif os.path.isfile(os.path.join(path, item)):\r\n if any(image_type in item.lower() for image_type in image_types):\r\n preview = image_preview(os.path.join(path, item))\r\n fileList.append((item, preview))\r\n else:\r\n fileList.append((item, None))\r\n\r\n return dirList, fileList", "def load_video_paths(dir):\n VIDEO_EXTENSIONS = ['.mov', '.MOV', '.mp4']\n video_paths = []\n\n # traverse directory to obtain only paths to videos\n for dir_name, _, paths in sorted(os.walk(os.path.expanduser(dir))):\n for path in paths:\n if any(path.endswith(extensions) for extensions in VIDEO_EXTENSIONS):\n video_paths.append(os.path.expanduser(dir_name + '/' + path))\n\n return video_paths", "def get_datapaths(input_dir):\n image_paths = []\n assert os.path.isdir(input_dir), f\"{input_dir} is not existed\"\n\n for root, _, names in os.walk(input_dir):\n for name in names:\n path = os.path.join(root, name)\n image_paths.append(path)\n return image_paths", "def get_source_files(self):\n return [\n path.as_posix()\n for path in _Path(self.src_dir).rglob(\"*\")\n if not path.is_dir()\n ] + [\n (path / \"CMakeLists.txt\").as_posix()\n for path in _PurePath(self.src_dir).parents\n ]", "def listdir(self, name, source, test_data=()):\n assert isinstance(source, config_types.Path)\n self.m.path.assert_absolute(source)\n result = self._run(\n name, ['listdir', source],\n lambda: self.test_api.listdir(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(x) for x in result.stdout.splitlines()]\n result.presentation.logs['listdir'] = map(str, ret)\n return ret", "def get_skins_and_extensions(base_dir):\n ext_paths = []\n for subdir in ['extensions', 'skins']:\n for name in os.listdir(os.path.join(base_dir, subdir)):\n if os.path.isdir(os.path.join(base_dir, subdir, name)):\n ext_paths.append(os.path.join(subdir, name))\n return ext_paths", "def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]", "def get_content_directories() -> List[str]:\n result:list[str] = []\n for current_path in os.listdir(\"content\"):\n if os.path.isdir(os.path.join(\"content\", current_path)):\n result.append(os.path.join(\"content\", current_path))\n return result", "def get_filepaths(extract_dir):\n\n index = []\n labels = []\n _extract_dir = os.path.join(extract_dir, 'UCF-101')\n for folder in os.listdir(_extract_dir):\n labels.append(folder)\n folderpath = os.path.join(_extract_dir, folder)\n\n if not os.path.isdir(folderpath):\n continue\n\n for filename in os.listdir(folderpath):\n if 'avi' not in filename:\n continue\n\n if filename[0] == '.':\n continue\n\n filepath = os.path.join(folderpath, filename)\n\n if os.path.exists(filepath):\n index.append(filepath)\n else:\n print(filepath)\n return index, labels", "def fetch_path_from_dirs(list_of_search_dirs, key):\n outputs = []\n for d in list_of_search_dirs:\n this_search_path = os.path.join(d, \"*\" + key + \"*\")\n outputs.extend(glob.glob(this_search_path))\n return outputs", "def udimPaths(self):\n\t\ttry:\n\t\t\treturn [ textureFile( self.dirPath + a ) for a in os.listdir( self.dirPath ) if self.name + '.' in a ]\n\t\texcept:\n\t\t\treturn []", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def get_directories():\n # get current working dir\n directory = os.getcwd()\n # list of dir to look in repo for files\n directories = [\n directory,\n os.path.expanduser(os.path.join(directory, 'src')),\n os.path.expanduser(os.path.join(directory, 'tests'))\n ]\n return directories", "def findFiles(target, path):\r\n\tfiles = []\r\n\tlyst = os.listdir(path)\r\n\tfor element in lyst:\r\n\t\tif os.path.isfile(element):\r\n\t\t\tif target in element:\r\n\t\t\t\tfiles.append(path + os.sep + element)\r\n\t\telse:\r\n\t\t\tos.chdir(element)\r\n\t\t\tfiles.extend(findFiles(target, os.getcwd()))\r\n\t\t\tos.chdir(\"..\")\r\n\treturn files", "def walk_dir(path):\r\n\tassets = []\r\n\r\n\tfor file in os.listdir(path):\r\n\t\tif os.path.isdir(path + \"/\" + file):\r\n\t\t\tif not file.startswith(\".\"):\r\n\t\t\t\t# Ignore . dirs (e.g .svn)\r\n\t\t\t\tassets.extend(walk_dir(path + \"/\" + file))\r\n\t\telif file.endswith('.blend'):\r\n\t\t\tassets.append(path + \"/\" + file)\r\n\r\n\treturn assets", "def all_image_paths(self):\n self.labels = [i for i in (self.get_immediate_subdirectories(self.root_dir))\n if not i.startswith('.')]\n\n for root, subFolders, files in os.walk(self.root_dir):\n files = [i for i in files if not i.startswith('.')]\n files = files[:self.img_num] # hard coded - will not read in\n for i in files:\n self.all_files.append(os.path.abspath(root) + '/'.join(subFolders) + '/' + i)", "def list_selfplay_dirs(base_dir):\n\n model_dirs = [os.path.join(base_dir, x)\n for x in tf.io.gfile.listdir(base_dir)]\n return sorted(model_dirs, reverse=True)", "def get_all_videos_in_directory(directory: str):\n\n all_files_and_folders = os.listdir(directory)\n\n only_videos = []\n for file in all_files_and_folders:\n if is_video(file):\n only_videos.append(file)\n ...\n\n return only_videos", "def collect_project_source_files():\n source_files = glob.glob(PROJECT_SOURCE_FILES_FOLDER + '/**/*.py', recursive=True)\n # Insert root main.py at the beginning.\n source_files.insert(0, os.path.join(PROJECT_ROOT_FOLDER, 'main.py'))\n return list(map(lambda path: posixpath.join(*path.split('\\\\')), source_files))", "def getFiles(searchDir = './', extension = 'source'):\n from glob import glob \n\n return glob(searchDir+'/*.'+extension)", "def _get_files(self, path):\n result = []\n for f in os.listdir(path):\n if os.path.isdir(os.path.join(path, f)):\n result += self._get_files(os.path.join(path, f))\n else:\n result.append(os.path.join(path, f))\n return result", "def _path_files(self):\n\n if not os.path.exists(self.path):\n return None\n\n directory_content = os.listdir(self.path)\n files = []\n\n while len(directory_content) != 0:\n\n if not directory_content[0].startswith(self.path):\n directory_obj = os.path.join(self.path, directory_content[0])\n else:\n directory_obj = directory_content[0]\n\n if os.path.isfile(directory_obj):\n files.append(directory_obj)\n elif os.path.exists(directory_obj):\n temp_directory_content = os.listdir(directory_obj)\n for obj in temp_directory_content:\n directory_content.append(os.path.join(directory_obj, obj))\n directory_content.pop(0)\n\n return files", "def getfiles(path): \n global picture_list\n try:\n # dir_list has all files and directories in path\n # any directory is WITHOUT ending '/'\n dir_list = os.listdir(path)\n except:\n # path may not be a directory or permission error\n print \"ERROR: in getfiles, picture_list:\", picture_list\n picture_list = None\n return\n \n for line in dir_list:\n file = path + \"/\" + line\n if os.path.isdir(file):\n getfiles( file) # dig into subdirectory\n elif isPicture(file):\n picture_list.append(file)\n else: \n # neither picture file nor directory; ignore \n pass\n return", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def get_filepaths(self):\n image_filepaths = set()\n for one_dir in self.dirs:\n for root, dirnames, filenames in os.walk(one_dir):\n for filename in filenames:\n if re.search(r\"\\.(jpg|jpeg|png|bmp|tiff)$\", filename):\n image_filepaths.add(os.path.join(root, filename))\n image_filepaths = sorted(list(image_filepaths))\n return image_filepaths", "def getImmediateSubdirectories(dir):", "def list_files_and_dirs(self, path=\"/\"):\n dirs = self.list_dirs(path)\n files = self.list_files(path)\n return dirs + files", "def generate_paths(dirname, recursive=False):\n if recursive:\n gen = itertools.chain.from_iterable(\n map(lambda x: map(partial(os.path.join, x[0]), x[2]),\n os.walk(dirname)))\n else:\n gen = filter(os.path.isfile,\n map(partial(os.path.join, dirname), os.listdir(dirname)))\n return filter(lambda s: re.match('.+\\.midi?$', s, re.I), gen)", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list", "def list_of_medias(args, sourcedir, recursive):\n files = list_of_files(sourcedir, recursive)\n return [_ for _ in files if is_media_within_dates(_, args.dates)]", "def list_directory(path):\n files = []\n for f in listdir(path):\n if isfile(join(path, f)) and f.endswith('.mp3'):\n files.append(f)\n return files", "def get_subdirectories(self, physical_path):\n result = []\n for p in os.listdir(physical_path):\n if not os.path.isdir(os.path.join(physical_path, p)):\n continue\n result.append(os.path.join(physical_path, p))\n\n return result", "def list_dir(self, path):", "def flatfiles(cam):\n return fullpathlist(flatpath(cam))", "def get_media_directory():\n\treturn _paths[_MEDIA_DIRECTORY_KEY]", "def getAllDirs(self):\n\n dirs = [ self ]\n for d in self._subdirs:\n if d.hasImages():\n dirs += d.getAllDirs()\n return dirs", "def get_image_list(path: str) -> list:\n\n return list(os.path.join(path, f)\n for f in os.listdir(path)\n if f.endswith('.jpg'))", "def getMediaFileList(path):\n\n fileTypes = (\"jpg\", \"mov\", \"mp4\")\n fileList = []\n for base_dir, dirs, files in os.walk(path):\n fileList.extend([os.path.join(base_dir, f) for f in files if f.split(\".\")[1].lower() in fileTypes])\n\n # for the new canon camera, ther are some .Trash and trashinfo files, want to ignore them\n fileList = [file for file in fileList if \"trash\" not in file and \"Trash\" not in file]\n return fileList", "def get_filepaths(directory):\n file_paths = [] # List which will store all of the full filepaths.\n\n # Walk the tree.\n\n for root, directories, files in os.walk(directory):\n\n for filename in files:\n if filename.endswith('.wav'):\n # Join the two strings in order to form the full filepath.\n filepath = os.path.join(root, filename)\n file_paths.append(filepath) # Add it to the list.\n # pdb.set_trace()\n file_paths.sort()\n return file_paths", "def _get_file_paths(self):\n return [os.path.join(self.path, self.mode, 'waveforms', file_name + '.npy') for file_name in self.file_names]", "def source_dirs_files(fspath, fil=None):\r\n dirs = []\r\n files = []\r\n for child in fspath.listdir(fil=fil):\r\n if child.basename.startswith('.'):\r\n continue\r\n if child.check(dir=True):\r\n dirs.append(child)\r\n elif child.check(file=True):\r\n if child.ext in ['.pyc', '.pyo']:\r\n continue\r\n files.append(child)\r\n return sorted(dirs), sorted(files)", "def get_paths(input_folder):\n list_files = []\n conll_folder = glob.glob(input_folder + '/*.json')\n \n for filename in conll_folder:\n list_files.append(filename)\n\n return list_files", "def get_lists_in_dir(dir_path):\n image_list = []\n\n for filename in glob.glob(dir_path + '/*.jpg'):\n image_list.append(filename)\n return image_list", "def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths", "def search_images(\n current_dir: str,\n exts={\"jpg\", \"png\", \"jpeg\", \"gif\"}\n) -> typing.Iterable[typing.Tuple[str, str]]:\n for root, _, files in os.walk(current_dir):\n for file_name in files:\n ext = file_name.rsplit('.', 1)[-1].lower()\n if ext in exts:\n yield os.path.join(root, file_name), file_name", "def get_dirs(source_dir):\n all_dirs = set()\n it = os.walk(source_dir)\n it.next()\n dirs = list(it)\n for d in dirs:\n if len(d[1])==0:\n all_dirs.add(d[0])\n return all_dirs", "def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(), self.target_base)\r\n for src in self.sources:\r\n yield os.path.join(abs_target_base, src)", "def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths", "def segment_paths(root):\n directories = []\n history = history_path(root)\n for d in os.listdir(history):\n path = os.path.join(history, d)\n if os.path.isdir(path):\n directories.append(path)\n return sorted(directories)", "def get_path_names(directory):\n paths_without_source = set()\n paths = glob.glob(source + \"**/*.*\", recursive=True)\n for p in paths:\n paths_without_source.add(p.replace(directory, \"\", 1))\n\n return paths_without_source", "def list_sources(config, base_dir, verbose=False):\n for source in config.sources_under(abspath(base_dir)):\n if verbose:\n print(\"# %s (%s)\" % (source.nicedir, ' '.join(source.info)))\n else:\n print(source.nicedir)", "def demo_paths(self):\n base_path = os.path.join(self.module.__path__[0], 'demo')\n paths = []\n if os.path.isdir(base_path):\n for item in os.listdir(base_path):\n # TODO: support examples which is not auto-loaded\n if not os.path.isdir(os.path.join(base_path, 'examples')):\n paths.append(os.path.join(base_path, item))\n return paths", "def parse_dir_imgs(root_pth):\n def visit(imgpths, pth, names):\n # Appends detected image filenames to a list.\n imgpths.extend([os.path.join(pth, name) for name in names\n if os.path.splitext(name)[1].lower() in img_exts])\n # Walk down directory tree and get the image file paths\n imgpaths = []\n for dp, foo, names in os.walk(root_pth):\n visit(imgpaths, dp, names)\n # Make lowercased list of imagefilenames\n imgnames = [os.path.split(pth)[1].lower() for pth in imgpaths]\n return imgnames, imgpaths", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n image_paths = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_paths.append(os.path.join(looproot, filename))\n return image_paths", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n image_paths = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_paths.append(os.path.join(looproot, filename))\n return image_paths", "def get_feature_paths(start_dir, extensions = ['dcm']):\n if start_dir is None:\n start_dir = os.getcwd()\n img_paths = []\n for roots,dirs,files in os.walk(start_dir):\n for name in files:\n for e in extensions:\n if name.endswith('.' + e):\n img_paths.append(roots + '/' + name)\n img_paths.sort()\n return img_paths", "def scandir(path_):\n return os.listdir", "def extract_embeddings_recursive_from_dir(self, dir_from: PathLike, dir_to: PathLike) -> PathLike:\n pass", "def get_img_files(images, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img') \n locs = db.get_img_locs(images)\n titles = db.get_location_titles()\n returnval = []\n for image in images:\n loc = locs[image]\n if loc is None:\n raise ValueError('The image %s could not be found' % image)\n returnval.append(path.join(img_dir, titles[loc], str(image) + '.jpg'))\n return returnval", "def collect_files(path, audio_files):\n\n for entry in os.scandir(path):\n if entry.is_dir():\n collect_files(entry.path, audio_files)\n if entry.is_file() and (entry.path.endswith(\".flac\") or entry.path.endswith(\".wav\")):\n audio_files.append(entry.path)", "def get_imlist(path):\n return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def getSourcePaths(self, makeGlyphs=True, makeKerning=True, makeInfo=True):\n paths = []\n for name in self.sources.keys():\n paths.append(self.sources[name][0].path)\n return paths", "def filePaths(directory_with_files):\n\n # get a list of file names in directory\n list_of_files = os.listdir(directory_with_files) \n\n # join directory path and file name to get full paths to files\n filepaths = [os.path.join(directory_with_files, filename) for filename in list_of_files]\n\n return filepaths", "def getContentFiles():\n contentFiles = []\n for contentDir, subDirs, filenames in os.walk(sourceDir, followlinks=True):\n if shouldIgnore(contentDir):\n subDirs[:] = []\n continue\n for filename in filenames:\n if not shouldIgnore(filename):\n cf = ContentFile(os.path.join(contentDir, filename))\n log(`cf.path`)\n contentFiles.append(cf)\n return contentFiles", "def get_imlist(path):\n\treturn [os.path.join( path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def get_filepaths(directory):\n file_paths = [] # List which will store all of the full filepaths.\n\n # Walk the tree.\n for root, directories, files in os.walk(directory):\n root = os.path.normpath(root)\n directories.sort()\n files.sort()\n for filename in files:\n # Join the two strings in order to form the full filepath.\n if not root.startswith('_') and not root.startswith('.') and filename.endswith(\".md\"):\n filepath = os.path.normpath(os.path.join(root, filename))\n file_paths.append(filepath) # Add it to the list.\n\n return file_paths # Self-explanatory.", "def files(self):\n self._printer('\\tFiles Walk')\n for directory in self.directory:\n for path in os.listdir(directory):\n full_path = os.path.join(directory, path)\n if os.path.isfile(full_path):\n if not path.startswith('.'):\n self.filepaths.append(full_path)\n return self._get_filepaths()", "def filepaths(self, langs) -> Generator[str, None, None]:\n for pv in self.projects(langs):\n yield from pv.filepaths()", "def getVideosPath(self):\r\n videoTypes = [\r\n ('MP4 files', '*.mp4'),\r\n ('3GP files', '*.3gp'),\r\n ('WMV files', '*.wmv'),\r\n ('FLV files', '*.flv'),\r\n ('AVI files', '*.avi'),\r\n ]\r\n global filenames\r\n filenames = askopenfilenames(title=\"Select video files\", multiple=True, )", "def extract_from_dir(self, data_dir):\n data_paths = []\n for(dirpath, dirnames, filenames) in os.walk(data_dir):\n for file in filenames:\n # Make sure that we don't get garbage files\n file_type = file.split('.')\n if len(file_type) == 1 or (file_type[1] not in ['wav', 'txt']):\n continue\n\n data_paths.append(os.path.join(dirpath, file))\n\n return data_paths", "def volume_paths(path):\n files = (os.path.join(path, f) for f in sorted(os.listdir(path)))\n return [f for f in files if os.path.isdir(f) or f.endswith('.zip')]", "def get_all_sources(remit):\n if remit == 'panzer' or remit == 'pandoc':\n os.chdir('source-'+remit)\n sourcelist = [name for name in os.listdir(\".\") if os.path.isdir(name)]\n os.chdir('..')\n else:\n # get the maximal list of sources for a diff\n pandoc_list = get_all_sources('pandoc')\n panzer_list = get_all_sources('panzer')\n sourcelist = list(set(pandoc_list+panzer_list))\n sourcelist.sort()\n return sourcelist", "def list_files_in_directory(self):\n lesson_file_dict = dict()\n lesson_file_dict[\"files\"] = []\n\n directory_list = listdir(self.sub_dir)\n for directory in directory_list:\n if isfile(join(self.sub_dir, directory)):\n lesson_file_dict[\"files\"].append(directory)\n\n return lesson_file_dict", "def files_to_upload(source_directory: str) -> list:\n upload_file_names = []\n\n print(source_directory)\n for dirName, subdirList, fileList in os.walk(source_directory):\n for filename in fileList:\n file_path = os.path.join(dirName, filename)\n s3key = os.path.join(os.path.basename(dirName) + '/' + filename)\n upload_file_names.append((file_path, s3key))\n return upload_file_names", "def darkfiles(cam):\n return fullpathlist(darkpath(cam))", "def _subdirectories(self):\n for o in os.listdir(self.directory):\n if os.path.isdir(os.path.join(self.directory, o)):\n yield os.path.join(self.directory, o)", "def dirs(self, dirs=['.']):\n return [Path(d) for d in dirs]", "def find_media_files(dirs, exclude_dirs=None):\n def condition(file_):\n return file_.isreg() and file_.check_regex(MEDIA_EXTENSIONS_REGEX)\n\n def precondition(file_):\n for dir_ in exclude_dirs:\n if dir_ in file_.path:\n return False\n return True\n\n if exclude_dirs is None:\n exclude_dirs = []\n result = []\n for dir_ in dirs:\n for file_ in pd.find.find(dir_, condition, precondition):\n m = re.match(TV_SHOW_REGEX, file_.path)\n if not m:\n print \"Skipped %s\" % file_.path\n continue\n tv_show, season, episode = m.groups()\n tv_show = tv_show.replace('.', ' ')\n result.append((tv_show, int(season), int(episode), file_.path))\n\n return result", "def load_paths(dir):\n IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.bmp', '.BMP']\n image_paths = []\n\n # traverse directory to obtain only paths to images\n for dir_name, _, paths in sorted(os.walk(os.path.expanduser(dir))):\n for path in paths:\n if any(path.endswith(extensions) for extensions in IMG_EXTENSIONS):\n image_paths.append(os.path.expanduser(dir_name + '/' + path))\n\n return image_paths" ]
[ "0.6816371", "0.6584394", "0.65694135", "0.6454687", "0.64294946", "0.63914907", "0.63858587", "0.6369066", "0.6280144", "0.62456554", "0.6161519", "0.6160253", "0.61361057", "0.61112374", "0.60726917", "0.60657465", "0.6015738", "0.60046023", "0.59833866", "0.5970308", "0.5962931", "0.5946842", "0.5943449", "0.5941118", "0.59369385", "0.5896389", "0.58943665", "0.58893466", "0.5880047", "0.5864546", "0.58619297", "0.5841441", "0.58404475", "0.5835982", "0.582826", "0.5820611", "0.58126026", "0.5809517", "0.5809223", "0.5802272", "0.5800264", "0.5798355", "0.5785188", "0.5779069", "0.577144", "0.5770997", "0.5766476", "0.5761326", "0.57604194", "0.575914", "0.5757382", "0.57541484", "0.5743288", "0.5736625", "0.5735156", "0.5727884", "0.57136625", "0.57130337", "0.5698063", "0.56936485", "0.5692937", "0.56926304", "0.56895196", "0.5687305", "0.5680656", "0.5674805", "0.56727535", "0.5670253", "0.5670004", "0.5668927", "0.5668828", "0.5668534", "0.5667364", "0.5663523", "0.5655338", "0.5655338", "0.5654776", "0.5650429", "0.5641517", "0.56352395", "0.56346416", "0.56321883", "0.562954", "0.56246877", "0.56141394", "0.56131536", "0.56053716", "0.5602999", "0.5601989", "0.55894595", "0.55880797", "0.558112", "0.55761385", "0.55746007", "0.55714226", "0.5562068", "0.55565566", "0.55519444", "0.5551157", "0.55498815" ]
0.6835324
0
/Gilles/Dev/journal/tests/subdir/deeper2/deepest/OCT_20000112_000004.jpg > deeper2_deepest_OCT_20000112_000004.jpg /Gilles/Dev/journal/tests/subdir/deeper2/deepest > deeper2_deepest
def relative_name(media_fullname, sourcedir): x = os.path.relpath(media_fullname, sourcedir) x = x.replace('\\', '_').replace('/', '_').replace('#', '_') return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_train(train_img_path):\n\n f = open(\"train.txt\", \"w+\")\n for subdirs, dirs, files in os.walk(train_img_path):\n for filename in files:\n if filename.endswith(\".jpg\"):\n train_image_path = os.path.join(train_img_path, filename)\n print(train_image_path)\n f.write(train_image_path + \"\\n\")\n f.close()", "def save_step_2(imgs, match_list, output_path=\"./output/step2\"):\n # ... your code here ...\n for i in range(len(imgs)):\n name1,tail1 = str.split(filenames[match_list[i][0]],\".\")\n name2,tail2 = str.split(filenames[match_list[i][2]],\".\")\n cv2.imwrite(output_path+\"/\"+name1+\"_\"+str(match_list[i][1])+\"_\"+name2+\"_\"+str(match_list[i][3])+\"_\"+str(match_list[i][4])+\".jpg\", imgs[i])", "def prep(path,date,image):\n \n # run bash code with 'Popen'\n P = Popen('cp '+path+date+'/final/'+image+' ./', shell=True)\n P.wait()\n P = Popen('mv '+image+' '+image+'.fz', shell=True)\n P.wait()\n P = Popen('funpack *.fz', shell=True)\n P.wait()\n P = Popen('rm -rf *.fz', shell=True)\n P.wait()", "def appendpics(pathofimg, w_sub, h_sub, step):\n num = 0\n dirlist = []\n images = [] # images in each folder\n for root, dirs, fileswer in os.walk(pathofimg):\n if len(dirs)!= 0:\n for dir in dirs:\n dirlist.append(dir)\n for rooert, dirwerwes, files in os.walk(pathofimg+'/'+dir):\n for file in files:\n if(file.endswith('.png')):\n images.append(Image.open(pathofimg+'/'+dir+'/'+file))\n if(len(images)==81):\n break\n target = montage(images, w_sub, h_sub, step)\n target.save(pathofimg +'/'+ dir + '.png', quality=100)\n else:\n dir = 'Generated'\n for file in fileswer:\n if (file.endswith('.png')):\n images.append(Image.open(pathofimg +'/'+ file))\n target1 = montage(images, w_sub, h_sub, step)\n savepath = pathofimg +'/'+ 'generated'\n os.makedirs(savepath)\n target1.save(savepath +'/'+ dir + '.png', quality=100)", "def create_val(val_img_path):\n\n f = open(\"val.txt\", \"w+\")\n for subdirs, dirs, files in os.walk(val_img_path):\n for filename in files:\n if filename.endswith(\".jpg\"):\n val_image_path = os.path.join(val_img_path, filename)\n print(val_image_path)\n f.write(val_image_path + \"\\n\")\n f.close()", "def seperate_dog_cat(src, dst):\n imgs = [f for f in os.listdir(src) if os.path.isfile(os.path.join(src, f)) and not f.startswith('.')]\n \n dst_dog = os.path.join(dst, 'dog')\n dst_cat = os.path.join(dst, 'cat')\n if not os.path.exists(dst_dog):\n os.makedirs(dst_dog)\n if not os.path.exists(dst_cat):\n os.makedirs(dst_cat)\n \n for img in imgs:\n if 'dog' in img:\n move(os.path.join(src, img), dst_dog)\n if 'cat' in img:\n move(os.path.join(src, img), dst_cat)\n print('seperate done')", "def preprocess_images(file_path, new_file_path):\n if not os.path.isdir(new_file_path):\n os.mkdir(new_file_path)\n i = 0\n for dir in listdir(file_path):\n j = 0\n for image_path in listdir(file_path + '/' + dir):\n image = open_image(image_path)\n cv2.imwrite(file_path + '/' + image_path + '/' str(i) + '/' +str(i) + '.jpg', image)\n j += 1\n i += 1", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def save_step_1(imgs, output_path='./output/step1'):\n # ... your code here ...\n i=0\n for each in imgs:\n i+=1\n cv2.imwrite(output_path+\"/output\"+str(i)+\".jpg\", each)", "def imgWrite(img, path):\n dirMake(os.path.dirname(path))\n sitk.WriteImage(img, path)\n\n # Reformat files to be compatible with CIS Software\n #ext = os.path.splitext(path)[1].lower()\n #if ext == \".vtk\": vtkReformat(path, path)", "def save_step_3(img_pairs, match_list, output_path=\"./output/step3\"):\n # ... your code here ...\n for i in range(len(img_pairs)):\n name1,tail1 = str.split(filenames[match_list[i][0]],\".\")\n name2,tail2 = str.split(filenames[match_list[i][1]],\".\")\n cv2.imwrite(output_path+\"/\"+name1+\"_\"+name2+\".jpg\", img_pairs[i][0])\n cv2.imwrite(output_path+\"/\"+name2+\"_\"+name1+\".jpg\", img_pairs[i][1])", "def write(img, path):\n create_directories_for_file_name(path)\n writer = sitk.ImageFileWriter()\n writer.Execute(img, path, True)", "def write_image_to_file_incrementally(image):\r\n i = 0\r\n while os.path.exists(\"sample%s.jpeg\" % i):\r\n i += 1\r\n with open(\"sample%s.jpeg\" % i, \"wb\") as f:\r\n f.write(image)", "def createAllImageFiles(poly, name) :\n \n for i in range(len(poly.getPaths())):\n fileName = name + \"_\" + str(i) + \".dot\"\n imgName = name + \"_\" + str(i) + \".jpg\"\n \n Command = \"neato -Tjpeg \" + fileName + \" -o \" + imgName\n run(Command, shell=True)", "def create_full(dirpath, imgname):\n with open(join(dirpath, imgname), 'r+b') as f:\n with Image.open(f) as image:\n fullpicpath = join(outdir, fullpicdir, imgname)\n print(\"saving full pic to\", fullpicpath)\n image.save(fullpicpath, quality=75, optimize=True)", "def save_step_4(imgs, output_path=\"./output/step4\"):\n # ... your code here ...\n cv2.imwrite(output_path+\"/output.jpg\", imgs)", "def copy_database(path_images, path_labels, path_final_images):\n\n try:\n labels = sorted(os.listdir(path_labels))\n except FileNotFoudError:\n print(\"No such file or directory \", path_labels)\n\n try:\n images = sorted(os.listdir(path_images)) #+ \"RetinaNet_I04590/\"))\n except FileNotFoudError:\n print(\"No such file or directory \", path_images)\n\n \"\"\"if not os.path.exists(path_final_images + \"I04590/\"):\n os.mkdir(path_final_images + \"I04590/\")\n\n if not os.path.exists(path_final_images + \"I045135/\"):\n os.mkdir(path_final_images + \"I045135/\")\n\n if not os.path.exists(path_final_images + \"I090135/\"):\n os.mkdir(path_final_images + \"I090135/\")\n\n if not os.path.exists(path_final_images + \"I4590135/\"):\n os.mkdir(path_final_images + \"I4590135/\")\n\n if not os.path.exists(path_final_images + \"Params/\"):\n os.mkdir(path_final_images + \"Params/\")\n\n if not os.path.exists(path_final_images + \"Pauli2/\"):\n os.mkdir(path_final_images + \"Pauli2/\")\n\n if not os.path.exists(path_final_images + \"Pauli3/\"):\n os.mkdir(path_final_images + \"Pauli3/\")\n\n if not os.path.exists(path_final_images + \"Stokes/\"):\n os.mkdir(path_final_images + \"Stokes/\")\n\n if not os.path.exists(path_final_images + \"Rachel/\"):\n os.mkdir(path_final_images + \"Rachel/\")\n\n if not os.path.exists(path_final_images + \"Rachel2/\"):\n os.mkdir(path_final_images + \"Rachel2/\")\"\"\"\n\n for k in range(len(images)):\n if str(k) + \".xml\" in labels:\n copyfile(path_images + \"/\" + images[k],\n path_final_images + \"/\" + images[k])\n \"\"\"copyfile(path_images + \"RetinaNet_I04590/\" + str(k) + \".png\",\n path_final_images + \"I04590/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I045135/\" + str(k) + \".png\",\n path_final_images + \"I045135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I090135/\" + str(k) + \".png\",\n path_final_images + \"I090135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I4590135/\" + str(k) + \".png\",\n path_final_images + \"I4590135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Params/\" + str(k) + \".png\",\n path_final_images + \"Params/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli2/\" + str(k) + \".png\",\n path_final_images + \"Pauli2/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli3/\" + str(k) + \".png\",\n path_final_images + \"Pauli3/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Stokes/\" + str(k) + \".png\",\n path_final_images + \"Stokes/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel/\" + str(k) + \".png\",\n path_final_images + \"Rachel/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel2/\" + str(k) + \".png\",\n path_final_images + \"Rachel2/\" + str(k) + \".png\")\n copyfile(path_labels + str(k) + \".xml\",\n path_final_labels + str(k) + \".xml\")\"\"\"\n print(k)", "def _crop_write_image(self, inroot, images, outroot):\n for image in images:\n inimage_path = osp.join(inroot, image)\n cvimg = cv2.imread(inimage_path)\n cvimg = cvimg[60:-30, 25:-25]\n h, w, _ = cvimg.shape\n assert h == w == 128\n outimage_path = osp.join(outroot, image)\n cv2.imwrite(outimage_path, cvimg)\n print(outimage_path)", "def update_destination_file_name (file_name):\n\tglobal COUNTER \n\tCOUNTER += 1\n\tsplitted = file_name.split('/')\n\treturn file_name[:len(file_name)-len(splitted[-1])] + 'Image%05d' % COUNTER +'_'+splitted[-1]", "def jarvis(input_path, output_path): \n\n if not os.path.exists(f'{output_path}'):\n os.makedirs(f'{output_path}')\n\n file_list = [filename for filename in os.listdir(f'{input_path}') if '.tif' in filename]\n\n for filename in file_list:\n pathname = os.path.join(input_path, filename)\n new_name = f\"{output_path}{filename.replace('.lif - ', '_').replace('_5x-', '_')}\"\n copyfile(pathname, new_name)\n logger.info(f'{new_name}')", "def create_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF,\n DIR_BACK, DIR_TEXT, DIR_FINAL)\n \n for dir in dirs:\n try:\n os.mkdir(os.path.join(cwd, dir))\n except OSError, e:\n print 'directory (', dir, ') already exists'", "def group_image(directory, image, group):\r\n\tif os.path.exists(directory + \"\\\\\" + group):\r\n\t\tpass\r\n\telse:\r\n\t\ttry:\r\n\t\t\tos.mkdir(directory + '\\\\' + group)\r\n\t\t\tprint(\"Successfully created directory\", group)\r\n\t\texcept OSError:\r\n\t\t\tprint(\"Creation of directory failed.\")\r\n\ttry:\r\n\t\tshutil.copy(str(directory + '\\\\' + image), str(directory + \"\\\\\" + group + \"\\\\\" + image))\r\n\texcept OSError as OSe:\r\n\t\tprint(OSe)", "def move_images_and_list(path, final_path):\n #Lists all created folders\n directories = os.listdir(path)\n #Array that stores the path to each image\n lists = []\n #This variable will be used to give a unique name to each image\n tot_images = 0\n #Creates the path where will be stored all files\n if not os.path.exists(final_path):\n os.mkdir(final_path)\n #Iterates over each folder\n for ph in directories:\n #Iterates over each line of the generated file images.lst\n for img in open(os.path.join(path, ph, \"images.lst\")).readlines():\n \"\"\"Images are stored with a name, how many objects have and\n where it is, like this '01_0252_0067_0139_0222.jpg 1 252 67 139 222'\n so these five lines under changes the first part before '_', because\n in some cases, the command opencv_createsamples creates a same name\n to different positive images, this ensures a different name to each\n image\"\"\"\n split_space = img.split()\n split_underscore = split_space[0].split(\"_\")\n split_underscore[0] = str(tot_images)\n join_underscore = \"_\".join(split_underscore)\n join_space = \" \".join([join_underscore, *split_space[1:]])\n #Appends the new image's name to the list\n lists.append(join_space)\n #Moves each image in the folder to the final path, with a new name\n move(os.path.join(path, ph, split_space[0]),\n os.path.join(final_path, join_space.split()[0]))\n tot_images += 1\n #Writes a file withe the name of all images in the folder\n with open(os.path.join(final_path, \"images.lst\"), \"w+\") as f:\n for i in lists:\n f.write(\"\".join([i, '\\n']))\n #Removes the temporary path\n rmtree(os.path.abspath(path))\n #Name of the created file\n return \"images.lst\"", "def make_diff(file_before, file_after, file_output_name):\n if os.path.exists(file_output_name):\n shutil.rmtree(file_output_name)\n os.mkdir(file_output_name)\n psd_diff = diff(file_before, file_after)\n diff_content = {}\n for attr in [\"header\", \"layer\"]:\n diff_content[attr] = getattr(psd_diff, attr)\n with open(os.path.join(file_output_name, \"diff.json\"), \"w\") as diff_file:\n json.dump(diff_content, diff_file, indent=4)\n saved_files = []\n for layer_id in psd_diff.layer.keys():\n if len(psd_diff.layer_image[layer_id]) > 1:\n output_image = os.path.join(file_output_name, layer_id)\n psd_diff.layer_image[layer_id][\"before\"].save(output_image + \".before.png\")\n psd_diff.layer_image[layer_id][\"after\"].save(output_image + \".after.png\")\n diff_image_before = Image.new(\"RGBA\", psd_diff.layer_image[layer_id][\"before\"].size)\n diff_image_before_data = diff_image_before.load()\n diff_image_after = Image.new(\"RGBA\", psd_diff.layer_image[layer_id][\"after\"].size)\n diff_image_after_data = diff_image_after.load()\n width, height = diff_image_before.size\n pixel_index = 1\n for y in xrange(height):\n for x in xrange(width):\n if str(pixel_index) in diff_content[\"layer\"][layer_id][\"pixel\"]:\n diff_image_before_data[x, y] = tuple(diff_content[\"layer\"][layer_id][\"pixel\"][str(pixel_index)][\"before\"])\n diff_image_after_data[x, y] = tuple(diff_content[\"layer\"][layer_id][\"pixel\"][str(pixel_index)][\"after\"])\n else:\n diff_image_before_data[x, y] = (0, 0, 0, 0)\n diff_image_after_data[x, y] = (0, 0, 0, 0)\n pixel_index += 1\n diff_image_before.save(output_image + \".before.diff.png\", \"PNG\")\n diff_image_after.save(output_image + \".after.diff.png\", \"PNG\")\n saved_files.append(output_image + \".before.png\")\n saved_files.append(output_image + \".before.diff.png\")\n saved_files.append(output_image + \".after.diff.png\")\n saved_files.append(output_image + \".after.png\")\n saved_files.append(file_output_name + \"/diff.json\")\n return saved_files", "def handle_image(name):\n from_path = args.from_dir + name\n to_path = args.to_dir + name\n\n if width != args.width:\n subprocess.call('jpegtran -rotate 90 -grayscale ' + from_path + ' > ' \\\n + to_path, shell=True)\n else:\n subprocess.call('jpegtran -grayscale ' + from_path + ' > ' + to_path,\\\n shell=True)", "def set_image_out_path(self, image_path):\n i = image_path.rfind('/')\n self.image_out_path = image_path[:i+1] + \"out/\" + image_path[i+1:]", "def output_diff_local_files(gold_session, image_name):\n given_file = gold_session.GetGivenImageLink(image_name)\n closest_file = gold_session.GetClosestImageLink(image_name)\n diff_file = gold_session.GetDiffImageLink(image_name)\n failure_message = 'Unable to retrieve link'\n logging.error('Generated image: %s', given_file or failure_message)\n logging.error('Closest image: %s', closest_file or failure_message)\n logging.error('Diff image: %s', diff_file or failure_message)", "def merge_folders():\r\n from shutil import copyfile\r\n # Merge all folders into main folder\r\n grp_img_dir = os.listdir('Group_Test_Images')\r\n \r\n for grp_img_folder in grp_img_dir:\r\n image_folders = os.listdir('Group_Test_Images'+'/'+grp_img_folder)\r\n \r\n for img_label in image_folders:\r\n new_directory = 'Group_Test_Images'+'/'+img_label\r\n \r\n try:\r\n os.makedirs(new_directory)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n \r\n file_names = os.listdir('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label)\r\n \r\n for file in file_names:\r\n copyfile('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label+'/'+file, new_directory+'/'+file)", "def create_output_directory_for_resized_images():\n\n try:\n if not os.path.isdir(RESIZED_NEGATIVE_PATH):\n return os.makedirs(RESIZED_NEGATIVE_PATH)\n elif not os.path.isdir(RESIZED_POSITIVE_PATH):\n return os.makedirs(RESIZED_POSITIVE_PATH)\n except OSError as e:\n print('Error --> {}'.format(e))", "def moveFiles(rootDir):\n\n homedir = os.environ['HOME']\n albumDirec = 'AlbumCoverImages'\n #Check if a directory exists\n if not os.path.isdir(os.path.join(homedir, 'Pictures', albumDirec)):\n print('AlbumCoverImages not found, trying to make...')\n os.makedirs(os.path.join(homedir, 'Pictures', albumDirec))\n \n for root, dirs, files in os.walk(rootDir, topdown=False):\n #print('testtest')\n for name in files:\n \n\n #Find image files, and move them to albumCoverImages\n #For some bullshit reason or statments won't work here, have to\n # parse this out to elif statements, ughhhh...\n \n if '.jpg' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec)))\n \n elif '.png' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.gif' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.pdf' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n\n else:\n try:\n #Use tinytag to get file metadata\n tag = TinyTag.get(os.path.join(root, name))\n artistName = tag.artist\n albumName = tag.album\n \n #TODO: Need to add more conditions\n if isinstance(artistName, str):\n artistName = artistName.replace('/', '_')\n\n elif isinstance(albumName, str):\n albumName.replace('/', '_')\n \n\n #Check if the artists directory exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName)):\n os.makedirs(os.path.join(rootDir, artistName))\n print('{0} directory made!'.format(artistName))\n \n except ValueError:\n print('ValueError with {0}'.format(root+'/'+name))\n continue\n\n except TypeError:\n print('TypeError with {0}'.format(root+'/'+name))\n continue\n\n #Check if the songs album exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName, albumName)):\n os.makedirs(os.path.join(rootDir, artistName, albumName))\n print('{0} directory made!'.format(albumName))\n \n except TypeError:\n print('TypeError with {0}! Look at album directory making.'.format(root+'/'+name))\n continue\n\n #TODO: Check if album is in artist direc, if not, move it\n\n #Check if song is in album, if not move it \n try:\n if os.path.isfile(os.path.join(rootDir, artistName, albumName, name)) == False:\n os.rename(os.path.join(root, name), os.path.join(rootDir, artistName, albumName, name))\n print('{0} moved to {1}!'.format(name, albumName))\n \n except TypeError:\n print('TypeError with file {0}! Look at line song moving'.format(root+'/'+name))\n continue\n \n #TODO: Check if this part works\n except LookupError:\n if (\".jpg\") or (\".png\") or (\".7z\") or (\"README\") or (\".zip\") in name:\n continue\n \n else:\n print('No reader support for {0}'.format(name))\n continue", "def test_warp():\n image_path = \"./data/test/nifti/unit_test/moving_image.nii.gz\"\n ddf_path = \"./data/test/nifti/unit_test/ddf.nii.gz\"\n\n # custom output path with correct suffix\n out_path = \"logs/test_warp/out.nii.gz\"\n warp(image_path=image_path, ddf_path=ddf_path, out_path=out_path)\n assert os.path.isfile(out_path)\n shutil.rmtree(os.path.dirname(out_path))\n\n # custom output path without correct suffix\n out_path = \"logs/test_warp/out.h5\"\n warp(image_path=image_path, ddf_path=ddf_path, out_path=out_path)\n out_path = \"logs/test_warp/warped.nii.gz\"\n assert os.path.isfile(out_path)\n shutil.rmtree(os.path.dirname(out_path))\n\n # custom output path without correct suffix\n out_path = \"logs/test_warp/\"\n warp(image_path=image_path, ddf_path=ddf_path, out_path=out_path)\n out_path = \"logs/test_warp/warped.nii.gz\"\n assert os.path.isfile(out_path)\n shutil.rmtree(os.path.dirname(out_path))\n\n # custom output path without correct suffix\n out_path = \"logs/test_warp\"\n warp(image_path=image_path, ddf_path=ddf_path, out_path=out_path)\n out_path = \"logs/warped.nii.gz\"\n assert os.path.isfile(out_path)\n shutil.rmtree(os.path.dirname(out_path))\n\n # custom output path\n out_path = \"warped.nii.gz\"\n warp(image_path=image_path, ddf_path=ddf_path, out_path=\"\")\n assert os.path.isfile(out_path)\n os.remove(out_path)", "async def save_url_images(images):\n for source, image in images:\n name = source.split('/')[-1]\n async with aiofiles.open(f'{OUTPUT_FOLDER}/{name}', 'wb') as f:\n await f.write(image)", "def make_image_dir(to_path, filenames):\n image_dir = os.path.join(to_path, \"image_2\")\n os.makedirs(image_dir)\n for f in filenames:\n image_file = os.path.join(image_dir, f + \".png\")\n os.system(\"cp sample.png {}\".format(image_file))", "def output_files(filepath):\n\n infile = open(filepath, 'r')\n lines = infile.readlines()\n\n rel_path = './'\n rel_path += lines[6][lines[6].find(':')+1:].strip()\n rel_path += lines[7][lines[7].find(':')+1:].strip()\n\n filename_I1 = lines[9][lines[9].find(':')+1:].strip()\n filename_I2 = lines[10][lines[10].find(':')+1:].strip()\n filename_IW = lines[12][lines[12].find(':')+1:].strip()\n filename_WE = lines[13][lines[13].find(':')+1:].strip()\n filename_CFLx = lines[15][lines[15].find(':')+1:].strip()\n filename_CFLv = lines[16][lines[16].find(':')+1:].strip()\n filename_S = lines[18][lines[18].find(':')+1:].strip()\n\n filepath_I1 = rel_path + filename_I1\n filepath_I2 = rel_path + filename_I2\n filepath_IW = rel_path + filename_IW\n filepath_WE = rel_path + filename_WE\n # filepath_CFLx = rel_path + filename_CFLx\n # filepath_CFLv = rel_path + filename_CFLv\n filepath_S = rel_path + filename_S\n\n outfile_I1 = open(filepath_I1, 'w')\n outfile_I2 = open(filepath_I2, 'w')\n outfile_IW = open(filepath_IW, 'w')\n outfile_WE = open(filepath_WE, 'w')\n # outfile_CFLx = open(filepath_CFLx, 'w')\n # outfile_CFLv = open(filepath_CFLv, 'w')\n outfile_S = open(filepath_S, 'w')\n\n outfiles = dict(I1 = outfile_I1,\n I2 = outfile_I2,\n IW = outfile_IW,\n WE = outfile_WE,\n # CFLx = outfile_CFLx,\n # CFLv = outfile_CFLv,\n S = outfile_S)\n\n return outfiles", "def output_files(filepath):\n\n infile = open(filepath, 'r')\n lines = infile.readlines()\n\n rel_path = './'\n rel_path += lines[6][lines[6].find(':')+1:].strip()\n rel_path += lines[7][lines[7].find(':')+1:].strip()\n\n filename_I1 = lines[9][lines[9].find(':')+1:].strip()\n filename_I2 = lines[10][lines[10].find(':')+1:].strip()\n filename_IW = lines[12][lines[12].find(':')+1:].strip()\n filename_WE = lines[13][lines[13].find(':')+1:].strip()\n filename_CFLx = lines[15][lines[15].find(':')+1:].strip()\n filename_CFLv = lines[16][lines[16].find(':')+1:].strip()\n filename_S = lines[18][lines[18].find(':')+1:].strip()\n\n filepath_I1 = rel_path + filename_I1\n filepath_I2 = rel_path + filename_I2\n filepath_IW = rel_path + filename_IW\n filepath_WE = rel_path + filename_WE\n # filepath_CFLx = rel_path + filename_CFLx\n # filepath_CFLv = rel_path + filename_CFLv\n filepath_S = rel_path + filename_S\n\n outfile_I1 = open(filepath_I1, 'w')\n outfile_I2 = open(filepath_I2, 'w')\n outfile_IW = open(filepath_IW, 'w')\n outfile_WE = open(filepath_WE, 'w')\n # outfile_CFLx = open(filepath_CFLx, 'w')\n # outfile_CFLv = open(filepath_CFLv, 'w')\n outfile_S = open(filepath_S, 'w')\n\n outfiles = dict(I1 = outfile_I1,\n I2 = outfile_I2,\n IW = outfile_IW,\n WE = outfile_WE,\n # CFLx = outfile_CFLx,\n # CFLv = outfile_CFLv,\n S = outfile_S)\n\n return outfiles", "def writeList(image, file, path):\n homepath = os.getcwd()\n\n os.chdir(path)\n\n image = image.rstrip('.fits')\n\n if os.path.exists(file):\n filelist = open(file, 'r').readlines()\n if image+'\\n' in filelist or not filelist:\n f = open(file, 'w')\n f.write(image+'\\n')\n else:\n f = open(file, 'a')\n f.write(image+'\\n')\n else:\n f = open(file, 'a')\n f.write(image+'\\n')\n f.close()\n os.chdir(homepath)\n\n return", "def _temp_analyze_files(tmpdir):\n img_dir = tmpdir.mkdir(\"img\")\n orig_img = img_dir.join(\"orig.img\")\n orig_hdr = img_dir.join(\"orig.hdr\")\n orig_img.open(\"w\")\n orig_hdr.open(\"w\")\n return orig_img.strpath, orig_hdr.strpath", "def crop_and_save(gray, faces):\r\n count = 0\r\n for (x, y, w, h) in faces:\r\n name = int(time.time())\r\n name = str(name)\r\n while os.path.isfile(\"temp/\" + name + \".png\"):\r\n count += 1\r\n name = name + \"+\" + str(count)\r\n cv2.imwrite(os.path.join(\"temp/\" + name + \".png\"), gray[y:y + h, x:x + w])", "def imageSaveOutput(image,name,number):\n FileName = name +\" \"+number\n mpimg.imsave(\"test_images_output\"+'//'+FileName,image)\n return 0;", "def save_unique_image():\r\n global folder_name\r\n filelist = [file for file in os.listdir('temp') if file.endswith('.png')]\r\n\r\n if filelist:\r\n for image_path in filelist:\r\n found = 0\r\n img_to_del = Image.open(\"temp/\" + image_path)\r\n if not get_immediate_subdirectories():\r\n found = 1\r\n os.makedirs('detected_faces/1/')\r\n img_to_del.save('detected_faces/1/'+ image_path)\r\n os.remove(os.path.join(temp_path, image_path))\r\n folder_name = 1\r\n else:\r\n for folder in get_immediate_subdirectories():\r\n folder_filelist = [file for file in os.listdir(\"detected_faces/\" + folder) if\r\n file.endswith('.png')]\r\n count = len(folder_filelist)\r\n file = folder_filelist[0]\r\n img_to_compare = Image.open(\"detected_faces/\" + folder + \"/\" + file)\r\n if img_to_del.size > img_to_compare.size:\r\n temp_image_resized = img_to_del.resize(img_to_compare.size, Image.ANTIALIAS)\r\n index = get_ssim(temp_image_resized, img_to_compare)\r\n elif img_to_del.size < img_to_compare.size:\r\n img_to_compare = img_to_compare.resize(img_to_del.size, Image.ANTIALIAS)\r\n index = get_ssim(img_to_del, img_to_compare)\r\n else:\r\n index = get_ssim(img_to_del, img_to_compare)\r\n if index > min_ssim_index_val:\r\n found = 1\r\n if count < 5:\r\n img_to_del.save(pathname + \"/\" + folder + \"/\" + image_path)\r\n print image_path\r\n if os.path.isfile(os.path.join(temp_path, image_path)):\r\n os.remove(os.path.join(temp_path, image_path))\r\n if found == 0:\r\n folder_name += 1\r\n os.makedirs('detected_faces/' + str(folder_name))\r\n img_to_del.save(pathname + \"/\" + str(folder_name) + \"/\" + image_path)\r\n if os.path.isfile(os.path.join(temp_path, image_path)):\r\n os.remove(os.path.join(temp_path, image_path))", "def determine_output_ending():\n file_found = False\n idx = 1\n while not file_found:\n if not os.path.isfile(LOG_DIR + \"/output%04d.png\" % (idx)):\n return \"%04d\" % (idx)\n idx += 1", "def write_output_data_to_disk(\n output_data_dict,\n output_directory=\"./\",\n output_file_suffix=\".nii.gz\",\n overwrite_existing_files=False,\n):\n if output_data_dict is None:\n return\n\n filename_fields = [i for i in output_data_dict.keys() if i != \"parent_sorting_data\"]\n parent_sorting_data = output_data_dict[\"parent_sorting_data\"]\n\n files_written = {}\n\n \"\"\"\n Write the the converted images to disk\n\n ! CONSIDER\n We could simply write as we go?\n Pro: save memory, important if processing very large files\n Con: Reading as we go allows proper indexing\n\n \"\"\"\n\n for field in filename_fields:\n logger.info(\" Writing files for field: %s\", field)\n p = pathlib.Path(output_directory) / parent_sorting_data / field\n p.mkdir(parents=True, exist_ok=True)\n files_written[field] = []\n\n for field_filename_base, field_list in output_data_dict[field].items():\n # Check if there is a list of images with matching names\n # This will depend on the name format chosen\n # If there is a list, we append an index as we write to disk\n\n if isinstance(field_list, (tuple, list)):\n # Flatten\n field_list_flat = list(flatten(field_list))\n\n # Iterate\n for suffix, file_to_write in enumerate(field_list_flat):\n field_filename = field_filename_base + f\"_{suffix}\"\n\n # Some cleaning\n while \"__\" in field_filename:\n field_filename = field_filename.replace(\"__\", \"_\")\n\n while field_filename[-1] == \"_\":\n field_filename = field_filename[:-1]\n\n # Save image!\n output_name = (\n pathlib.Path(output_directory)\n / parent_sorting_data\n / field\n / (field_filename + output_file_suffix)\n )\n files_written[field].append(output_name)\n\n if output_name.is_file():\n logger.warning(\" File exists: %s\", output_name)\n\n if overwrite_existing_files:\n logger.warning(\" You have selected to overwrite existing files.\")\n\n else:\n logger.info(\n \" You have selected to NOT overwrite existing files. Continuing.\"\n )\n continue\n\n sitk.WriteImage(file_to_write, output_name.as_posix())\n\n else:\n field_filename = field_filename_base\n file_to_write = field_list\n\n # Some cleaning\n while \"__\" in field_filename:\n field_filename = field_filename.replace(\"__\", \"_\")\n\n while field_filename[-1] == \"_\":\n field_filename = field_filename[:-1]\n\n # Save image!\n \"\"\"\n ! TO DO\n Use pathlib, and perform some checks so we don\"t overwrite anything!\n \"\"\"\n output_name = (\n pathlib.Path(output_directory)\n / parent_sorting_data\n / field\n / (field_filename + output_file_suffix)\n )\n files_written[field].append(output_name)\n\n if output_name.is_file():\n logger.warning(\" File exists: %s\", output_name)\n\n if overwrite_existing_files:\n logger.warning(\" You have selected to overwrite existing files.\")\n\n else:\n logger.info(\n \" You have selected to NOT overwrite existing files. Continuing.\"\n )\n continue\n\n sitk.WriteImage(file_to_write, output_name.as_posix())\n\n return files_written", "def collect_and_rename() -> None:\n image_source_folder = 'image_dir'\n label_source_folder = 'annotation_dir'\n image_target_folder = 'images'\n label_target_folder = 'labels'\n for i, (subdir, _, files) in enumerate(os.walk(image_source_folder), -1):\n # it walks the parent folder first, not a file\n if i == -1: \n continue\n subdir_name = subdir.split('\\\\')[1]\n for file_name in files:\n with open(f'{image_source_folder}/{subdir_name}/{file_name}') as image_file, \\\n open(f'{label_source_folder}/{subdir_name}/{file_name}'.split('.')[0] + '.txt') as label_file:\n shutil.copy2(image_file.name, f'{image_target_folder}/{\"%06d\" % i}.jpg')\n shutil.copy2(label_file.name, f'{label_target_folder}/{\"%06d\" % i}.txt')\n print(f'Processed {i} images')", "def quicksavefile(directory, text, format=\".out\"):\n print(text)\n print(directory)\n directory = directory.split(\".\")\n del directory[-1]\n directory.append(format)\n s = \"\".join(directory)\n file = open(s, \"w\")\n file.write(text)\n file.close()", "def enumerate_filename(path):\n for (dirpath_0, dirname_list, _) in os.walk(path):\n for dirname in tqdm(dirname_list):\n if dirname[0] != '.':\n dirpath_1 = dirpath_0 + '/' + dirname + '/'\n for (dirpath_2, __, filename_list) in os.walk(dirpath_1):\n for filename in filename_list:\n if filename[0] != '.':\n old_file = dirpath_2 + filename\n new_file = dirpath_2 + str(filename_list.index(filename)) + '.jpg'\n if os.path.exists(new_file):\n print ('File %s already exist' % new_file)\n continue\n else:\n try:\n os.rename(old_file, new_file)\n except:\n print('Couldn\\'t rename %s' % old_file)\n continue", "def main(vis_dirs, outdir):\n assert len(vis_dirs) == 4\n\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n for i, filename in enumerate(tqdm(os.listdir(vis_dirs[-1]))):\n # if i % 100 == 0:\n # print(i)\n\n files = [os.path.join(vis_dir, filename) for vis_dir in vis_dirs]\n outimg = os.path.join(outdir, filename)\n merge_four_images(files, outimg)\n\n print (\"Finished! Result dir is %s\" % outdir)", "def produce_reversed_images(source_dir, save_dir):\n if os.path.exists(source_dir) is False:\n os.makedirs(source_dir)\n if os.path.exists(save_dir) is False:\n os.makedirs(save_dir)\n\n reversed_images_index = 0\n source_img = find_images(source_dir)\n print(\"Reversing the source images...\")\n for s in source_img:\n img, label = read_image_with_label(source_dir, s)\n for d in range(3): # 3 kinds of reversing\n reversed_images_index += 1\n img_new, label_new = reverse_image(img, label, d)\n img_name = str(reversed_images_index) + \"_\" + str(label_new[0]) + \"_\" + str(label_new[1]) + \"_.png\"\n img_new.save(os.path.join(save_dir, img_name))\n if reversed_images_index % 100 == 0:\n print(\"%d images already be produced\" % reversed_images_index)\n print(\"Reverse images reversed: %d\" % reversed_images_index)", "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def image_splitter(foreground, filename, outfolder_random, outfolder_art, the_class):\n imarray = numpy.random.rand(256, 256, 3) * 255\n background = Image.fromarray(imarray.astype('uint8')).convert('RGBA')\n background2 = Art().redraw()\n foreground = foreground.convert(\"RGBA\")\n datas = foreground.getdata()\n\n new_data = []\n for item in datas:\n if item[0] < 10 and item[1] < 10 and item[2] < 10:\n new_data.append((0, 0, 0, 0))\n else:\n new_data.append(item)\n\n foreground.putdata(new_data)\n\n if not os.path.isdir(outfolder_random + \"/\" + the_class):\n os.makedirs(outfolder_random + \"/\" + the_class)\n if not os.path.isdir(outfolder_art + \"/\" + the_class):\n os.makedirs(outfolder_art + \"/\" + the_class)\n\n background.paste(foreground, (0, 0), foreground)\n new_name = filename[:-17]\n background.save(outfolder_random + \"/\" + the_class + \"/\" + new_name + \"jpg\", \"JPEG\")\n\n background2.paste(foreground, (0, 0), foreground)\n background2.save(outfolder_art + \"/\" + the_class + \"/\" + new_name + 'jpg', \"JPEG\")", "def __save_to_dir(self, imagelist, prefix, PATH):\n for pair in imagelist:\n directory = os.path.join(PATH, pair[1])\n if not os.path.exists(directory):\n os.mkdir(directory)\n filename = prefix + pair[2]\n pair[0].save(os.path.join(directory, filename))\n print(\"Saved \" + os.path.join(directory, filename))", "def create_and_write_output(predictions_path,output_path,inpDir):\n \n filenames= sorted(os.listdir(predictions_path)) \n for filename in filenames:\n \n # read the 3 channel output image from the neural network\n image=cv2.imread(os.path.join(predictions_path,filename))\n \n # create binary image output using the create_binary function\n out_image=create_binary(image) \n \n # read and store the metadata from the input image\n with BioReader(os.path.join(inpDir,filename)) as br:\n metadata = br.metadata\n\n # Write the binary output consisting of the metadata using bfio.\n output_image_5channel=np.zeros((out_image.shape[0],out_image.shape[1],1,1,1),dtype=np.uint8)\n output_image_5channel[:,:,0,0,0]=out_image \n\n with BioWriter(os.path.join(output_path,filename), metadata=metadata) as bw:\n bw.dtype = output_image_5channel.dtype\n bw.write(output_image_5channel)", "def jarvis(input_path, output_path): \n \n if not os.path.exists(f'{output_path}'):\n os.makedirs(f'{output_path}')\n\n folder_list = [sample for sample in os.listdir(input_path) if os.path.isdir(f'{input_path}{sample}')]\n\n for folder in folder_list:\n\n file_list = [filename for filename in os.listdir(f'{input_path}{folder}/') if '.tif' in filename]\n mutant = '_'.join(folder.split(' '))\n\n for x, filename in enumerate(file_list):\n pathname = os.path.join(input_path, folder, filename)\n new_name = f'{output_path}{mutant}_{x}.tif'\n copyfile(pathname, new_name)\n # array_stack = skimage.io.imread(f'{pathname}').transpose(1, 2, 0)\n logger.info(f'{new_name}')", "def get_normal_image(image_path):\n resized_images = slice_and_resize(image_path)\n\n normal_full_img = join_images_horizontally(resized_images)\n\n folder = \"static/images/panorama\"\n\n name = next(tempfile._get_candidate_names())\n normal_path = \"%s/%s_resized.png\" % (folder, name)\n normal_full_img.save(normal_path)\n\n return normal_path", "def appendTrailer(inputImagePath, trailer, outputPath):\n\n with open(inputImagePath, \"rb\") as f:\n inputContent = f.read()\n\n with open(outputPath, \"wb\") as f:\n f.write(inputContent)\n f.write(trailer)", "def create_temp_files(containers):\n for name in containers:\n run_cmd(f\"rm -rf /tmp/{name}.img\", True)\n for name in containers:\n run_cmd(f\"truncate -s 1G /tmp/{name}.img\", True)", "def split(directory='', name=''):\n d = directory\n r_path = build_path(d, path.splitext(name)[0] + '_r.png')\n g_path = build_path(d, path.splitext(name)[0] + '_g.png')\n b_path = build_path(d, path.splitext(name)[0] + '_b.png')\n a_path = build_path(d, path.splitext(name)[0] + '_a.png')\n Image.open(build_path(d, name)).convert('RGBA').getchannel(0).save(r_path)\n Image.open(build_path(d, name)).convert('RGBA').getchannel(1).save(g_path)\n Image.open(build_path(d, name)).convert('RGBA').getchannel(2).save(b_path)\n Image.open(build_path(d, name)).convert('RGBA').getchannel(3).save(a_path)", "def _handle_old_style_images(staging_path):\n file_num = 0\n for filename in ('snap.vhd', 'image.vhd', 'base.vhd'):\n path = os.path.join(staging_path, filename)\n if os.path.exists(path):\n _rename(path, os.path.join(staging_path, \"%d.vhd\" % file_num))\n file_num += 1", "def download_images(src_dir, dest_dir):\n # +++your code here+++\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n res=utility(src_dir)\n k=0\n f=file(dest_dir+\"/\"+\"index.html\", 'w')\n f.write(\"<html><body>\")\n for i in res:\n local_name='image'+str(k)\n print \"downloading image%d\" %(k)\n urllib.urlretrieve(i, os.path.join(dest_dir, local_name))\n f.write(\"<img src=\"+'\"'+os.path.join(dest_dir, local_name)+'\"'+\">\")\n k+=1\n f.write(\"</body></html>\")\n f.close()\n cmd=\"xdg-open\"+\" \"+'\"'+dest_dir+\"/\"+\"index.html\"+'\"'\n (status, output)=commands.getstatusoutput(cmd)\n sys.exit(1)", "def write_tmp_blob(dir, name, sha):\n cmd = ['git', 'cat-file', '-p', sha ]\n abs_path = os.path.join(dir, os.path.basename(name))\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output = popen.communicate()[0]\n f = file(abs_path, 'w')\n f.write(output)\n f.close()\n return abs_path", "def copy_images_to_new(lab_img, from_dir, to_dir):\n \n for img in lab_img:\n if not os.path.exists(join(todir, img)):\n shutil.copyfile(join(fromdir, img), join(todir, img)) \n print(\"Done\")", "def TextureFiles():\n import shutil\n\n # first convert the .psd files to .png\n\n FbmDir = glo.outputFolder + '.fbm'\n\n for d1, d2, filenames in os.walk(FbmDir):\n for filename in filenames:\n \"\"\"filename: vitrin_diffuse.psd\n \"\"\"\n # print \"TextureFiles():\", filename\n if filename[-4:].upper() == '.PSD':\n #print \" -- FbmDir:\" , FbmDir\n #print \" -- in the if clause with filename:\" , filename\n #print \" -- glo.outputFolder\" , glo.outputFolder\n # FbmDir = '../fbx/simplelifeembedmedia.fbm'\n # filename = 'shelves_light.PSD'\n PsdToPngConverter(FbmDir, filename)\n\n # Move only the .png file to the ../png/ directory\n filename = filename[:-4] + '.png'\n src = os.path.join(FbmDir, filename)\n elif filename[0] != '.':\n src = os.path.join(FbmDir, filename)\n pass\n\n shutil.copy(src, glo.outputFolder)\n print os.path.join(glo.outputFolder, filename), \"\\n\"\n sys.stdout.flush()\n # for d1, d2, files in os.walk(glo.outputFolder):\n # if not filename in files:\n # #print \"moving: \", files, filename, not filename in files\n # shutil.copy(src, glo.outputFolder)\n # print os.path.join(glo.outputFolder, filename), \"\\n\"\n # else:\n # print \"%s/%s already exists. File not moved\" % (glo.outputFolder,filename)", "def photo2web_process_hattenbach():\n\n os.chdir('/Volumes/SSD External/Hattenbach_v2')\n \n dir_base = os.getcwd()\n \n dir_p2w = '/Users/throop/photos/Trips/'\n \n dirs = sorted(glob.glob(os.path.join(dir_base, '*')))\n \n quality_out = '60'\n size_out = '2000x2000'\n \n for i,dir in enumerate(dirs):\n if os.path.isdir(dir):\n os.chdir(dir)\n dir_originals = os.path.join(dir, 'originals')\n dir_originals_fullres = os.path.join(dir, 'originals_fullres')\n\n# For HH files, copy the 'actual' originals into a 'fullres' folder, for safekeeping\n\n if not os.path.isdir(dir_originals_fullres):\n os.rename(dir_originals, dir_originals_fullres)\n os.mkdir(dir_originals)\n \n files = glob.glob(os.path.join(dir_originals_fullres, '*'))\n\n# Get a list of all the images\n\n# For each image, make a low-res, low-quality image. This is just because the scanned files\n# are huge and high-quality, and not useful for online. They are much larger than necessary. \n# So we use 'convert' to shrink them in size and quality, and put the output into 'originals' directory \n# for photo2web.\n\n for file in files:\n file_short = os.path.basename(file)\n file_in = os.path.join(dir_originals_fullres,file_short)\n file_out = os.path.join(dir_originals,file_short)\n if not os.path.isfile(file_out):\n cmd = (f'convert -resize {size_out} -quality {quality_out}' +\n f' {file_in}' +\n f' {file_out}')\n print(f'{cmd}')\n \n subprocess.run(['convert', '-resize', size_out, '-quality', quality_out,\n file_in,\n file_out])\n\n# Now, finally, go thru and do photo2web on all of them.\n \n print(f'\\nProcessing directory {i}/{len(dirs)} {dir}\\n')\n subprocess.run(['cp', '-r', os.path.join(dir_p2w, 'header.txt'), '.'])\n subprocess.run(['cp', '-r', os.path.join(dir_p2w, 'photos.css'), '.'])\n if not os.path.exists('captions.txt'):\n subprocess.run(['captions_photo2web']) \n subprocess.run(['photo2web_old'])\n subprocess.run(['photo2web'])", "def save(images, output):\n for image, frame in images:\n image.save(output(frame))", "def compress_image(filename,k):", "def convert_for_submission(source_dir, target_dir):\r\n files = subfiles(source_dir, suffix=\".nii.gz\", join=False)\r\n maybe_mkdir_p(target_dir)\r\n for f in files:\r\n img = sitk.ReadImage(join(source_dir, f))\r\n out_file = join(target_dir, f[:-7] + \".nii\")\r\n sitk.WriteImage(img, out_file)", "def gen_text (path_img):\n \n try:\n image_files = []\n os.chdir(path_img)\n for filename in os.listdir(os.getcwd()):\n if filename.endswith(\".JPG\"):\n image_files.append(path_img + filename)\n with open(\"images.txt\", \"w\") as outfile:\n for image in image_files:\n outfile.write(image)\n outfile.write(\"\\n\")\n outfile.close()\n \n except KeyboardInterrupt:\n print('Interrupted')\n try:\n os.system.exit(0)\n except SystemExit:\n os._exit(0)", "def tiff_split(path_tiff_source, path_tiff_out):\n path_out_low = path_tiff_out + '\\\\' + 'low8bits'\n path_out_hig = path_tiff_out + '\\\\' + 'high8bits'\n path_out_list=[path_out_hig,path_out_low]\n makdir(path_out_low)\n makdir(path_out_hig)\n for file in glob.glob(path_tiff_source + '\\\\' + '*.tiff'):\n tif = TIFF.open(file, 'r')\n img = tif.read_image()\n img_low_8bits = img % 256\n img_hig_8bits = img / 256\n img_low_8bits = img_low_8bits.astype(np.uint8)\n img_hig_8bits = img_hig_8bits.astype(np.uint8)\n img_name = os.path.basename(file)\n tif_split_low = TIFF.open(path_out_low + '\\\\' + img_name, 'w')\n tif_split_low.write_image(img_low_8bits)\n tif_split_hig = TIFF.open(path_out_hig + '\\\\' + img_name, 'w')\n tif_split_hig.write_image(img_hig_8bits)\n return path_out_list", "def append_component_images(pldm_fw_up_pkg, image_files):\n for image in image_files:\n with open(image, \"rb\") as file:\n for line in file:\n pldm_fw_up_pkg.write(line)", "def process_directory(working_directory, cc_size, output_directory):\n print \"\\nProcessing directory {0}\".format(working_directory)\n \n for dirpath, dirnames, filenames in os.walk(working_directory):\n for f in filenames:\n if f.split('.')[-1] == 'tif':\n img = load_image(os.path.join(dirpath, f))\n onebitimage = img.to_onebit()\n onebitimage.despeckle(int(cc_size))\n output_path = os.path.join(output_directory, f)\n # print onebitimage\n # print (os.path.join(dirpath, f.split('.')[0]+ '_NEW.' + f.split('.')[-1]))\n # onebitimage.save_tiff(os.path.join(dirpath, f.split('.')[0]+ '_NEW.' + f.split('.')[-1]))\n\n onebitimage.save_tiff(output_path)\n print output_path\n else:\n pass", "def copy_mosaic(mosaic_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Original/',\r\n output_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Processing/',\r\n file_pattern='IID201905*jpg', replace=False): \r\n \r\n if not os.path.exists(mosaic_dir):\r\n sys.exit('input folder does not exist')\r\n \r\n mosaics = []\r\n for root, dirnames, filenames in os.walk(mosaic_dir):\r\n for filename in fnmatch.filter(filenames, file_pattern):\r\n mosaics.append(os.path.join(root, filename))\r\n \r\n c = 0\r\n s = 0\r\n r = 0\r\n for m in mosaics:\r\n f = output_dir + os.path.basename(m)\r\n if not os.path.exists(f):\r\n copyfile(m, f)\r\n print('copied: %s' % f)\r\n c+=1\r\n elif replace:\r\n copyfile(m, f)\r\n print('replaced: %s' % f)\r\n r+=1\r\n else:\r\n print('skipped: %s' % f)\r\n s+=1\r\n \r\n print('copied total of %i files' % c)\r\n print('replaced total of %i files' % r)\r\n print('skipped total of %i files' % s)", "def divide_fasta_like_file(input_file, output_dir, ext=''):\n with open(input_file, 'r') as file:\n body = ''\n p_id = ''\n for line in file:\n if line[0] == '>':\n if len(p_id) > 0:\n with open(output_dir + p_id.replace(':', '_') + '.' + ext, \"w\") as out_file:\n out_file.write('>' + p_id.replace(':', '_') + '\\n' + body + '\\n')\n body = ''\n p_id = line.strip()[1:]\n else:\n body += line.strip()\n with open(output_dir + p_id.replace(':', '_') + '.' + ext, \"w\") as out_file:\n out_file.write('>' + p_id.replace(':', '_') + '\\n' + body + '\\n')", "def pepsi(directory=None):\r\n \r\n if directory == None:\r\n directory = os.getcwd() # Use working directory if unspecified\r\n \r\n # Create a new directory 'modified'\r\n new_directory = os.path.join(directory, 'modified')\r\n try:\r\n os.mkdir(new_directory)\r\n except OSError:\r\n pass # if the directory already exists, proceed \r\n \r\n #load all the images\r\n image_list, file_list = get_images(directory) \r\n\r\n #go through the images and save modified versions\r\n red = PIL.Image.open(os.path.join(directory, 'red.png'))\r\n blue = PIL.Image.open(os.path.join(directory, 'blue.png'))\r\n template =PIL.Image.open(os.path.join(directory, 'template.png'))\r\n topp = PIL.Image.open(os.path.join(directory, '1.jpeg'))\r\n bottomm = PIL.Image.open(os.path.join(directory, '2.jpg'))\r\n \r\n # Round the corners with radius = 30% of short side\r\n\r\n top = redlogo(topp,red,template)\r\n bottom = bluelogo(bottomm,blue,template)\r\n new_image = template\r\n new_image.paste(bottom,(0,0), mask=bottom)\r\n new_image.paste(top,(0,0), mask=top)\r\n #save the altered image, suing PNG to retain transparency\r\n new_image_filename = os.path.join(new_directory, 'final' + '.png')\r\n new_image.save(new_image_filename) #9b: ", "def create_noobj_folder(\n folder: PathLike, \n img_ext: str = \".jpg\",\n):\n folder = Path(folder).expanduser().resolve()\n images = glob(folder, img_ext)\n \n for image in images:\n filename = image.name\n _folder = image.parent.name\n path = folder / (image.stem + \".xml\")\n img_w, img_h = get_image_size(image)\n\n tree = ET.Element(\"annotation\")\n\n et_folder = ET.SubElement(tree, \"folder\")\n et_folder.text = _folder\n\n et_filename = ET.SubElement(tree, \"filename\")\n et_filename.text = filename\n\n et_path = ET.SubElement(tree, \"path\")\n et_path.text = str(path)\n\n et_img_size = ET.SubElement(tree, \"size\")\n ET.SubElement(et_img_size, \"width\").text = str(img_w)\n ET.SubElement(et_img_size, \"height\").text = str(img_h)\n ET.SubElement(et_img_size, \"depth\").text = \"3\"\n\n content = ET.tostring(tree, encoding=\"unicode\", pretty_print=True)\n try: \n path.write_text(content)\n except KeyboardInterrupt:\n path.write_text(content)\n exit()", "def add_image(self, f_name,file,new_id):\r\n folder=tempfile.mktemp()\r\n os.mkdir(folder)\r\n datei=open(folder+'/'+f_name,'w+')\r\n datei.write(file.read())\r\n datei.close()\r\n val='' \r\n liste_ext=liste_val\r\n if(self.toolbox.hasProperty('eigene_formate')):\r\n self_val=self.toolbox.getProperty('eigene_formate').split(',')\r\n liste_ext=[]\r\n for x in self_val:\r\n liste_ext.append('_'+x+'.jpeg')\r\n for extension in liste_ext:\r\n #cmd='/usr/bin/convert '+folder+'/'+f_name+' -resize '+extension[1:-4]+'x'+extension[1:-4]+' '+folder+'/'+new_id+extension\r\n cmd='/usr/bin/convert '+folder+'/'+f_name+' -resize '+extension[1:-4]+' '+folder+'/'+new_id+extension\r\n order=os.popen(cmd).read()\r\n kurz_name='_'+str(f_name.split('.')[0])\r\n kurz_name=kurz_name.replace(' ','_')\r\n val=val+self.manage_addImage(id=new_id+kurz_name+extension,file=open(folder+'/'+new_id+extension),title=f_name, precondition='', content_type='',REQUEST=None)+' ' \r\n os.remove(folder+'/'+new_id+extension)\r\n os.remove(folder+'/'+f_name)\r\n os.rmdir(folder)\r\n txt=\"Datei Hochgeladen!<br>\"\r\n #my_root=self.toolbox\r\n #txt+=my_root.id+\"<br>\"\r\n #if(my_root.hasProperty('eigene_formate')):\r\n # txt+=my_root.getProperty('eigene_formate')+\"<br>\"\r\n return txt", "def ren_mosaic(mosaic_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Original/', \r\n file_pattern='*stitch.jpg'): \r\n \r\n \r\n if not os.path.exists(mosaic_dir):\r\n sys.exit('input folder does not exist')\r\n \r\n mosaics = []\r\n for root, dirnames, filenames in os.walk(mosaic_dir):\r\n for filename in fnmatch.filter(filenames, file_pattern):\r\n mosaics.append(os.path.join(root, filename).replace('\\\\','/'))\r\n \r\n s = 0\r\n r = 0\r\n for m in mosaics:\r\n dir_name = os.path.dirname(m).split('/')[-1]\r\n new_name = os.path.dirname(m) + '/' + dir_name + '.jpg'\r\n if os.path.exists(new_name):\r\n print('skipping: %s' % m)\r\n s+=1\r\n else:\r\n os.rename(m, new_name)\r\n print('renamed: %s' % new_name)\r\n r+=1\r\n \r\n print('renamed total of %i files' % r)\r\n print('skipped total of %i files' % s)", "def move_files(probs):\r\n path = '../brain_tiny_dataset_class/png/'\r\n for _, _, files in os.walk(path):\r\n for file in files:\r\n # Reads the ID\r\n id = file[3:-4]\r\n try:\r\n # Reads dictionary of probabilities\r\n result = probs[id]\r\n # Moves pictures in 2 folders\r\n if result['epidural'] > 0 or result['intraparenchymal'] > 0 \\\r\n or result['intraventricular'] > 0 or result['subarachnoid'] > 0 \\\r\n or result['subdural'] > 0:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/hemorrhage/' + file)\r\n else:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/healthy/' + file)\r\n except KeyError:\r\n continue", "def JPGtoPNGConverter(source, dest):\n files = os.listdir(f\"./{source}\")\n if not os.path.exists(f\"./{dest}\"):os.makedirs(f\"./{dest}\")\n\n for file in files:\n if os.path.splitext(file)[-1] == \".jpg\":\n img = Image.open(f\"./{source}/{file}\")\n clean_text = os.path.splitext(file)[0]\n img.save(f\"./{dest}/{clean_text}.png\",\"png\")\n else:\n print(f\"Your filename: {file} is not in .JPG format !!\")\n return \"All files converted successfully :) \"", "def write_camera_pose_to_file(camera_pose_abs_dict: dict, pose_dir_path: str) -> None:\n image_dst = path.join(pose_dir_path, 'images.txt')\n with open(image_dst, 'w+') as file:\n file.write('# Image list with two lines of data per image:\\n')\n file.write('# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\\n')\n file.write('# POINTS2D[] as (X, Y, POINT3D_ID)\\n')\n file.write(f'# Number of images: {len(camera_pose_abs_dict.keys())}\\n')\n\n # write each camera pose to file\n for image in camera_pose_abs_dict.keys():\n image_pose_data = []\n t_vector = camera_pose_abs_dict[image][1]\n qx, qy, qz, qw = rotation_matrix_to_quaternion(camera_pose_abs_dict[image][0])\n\n image_pose_data.append(str(image))\n # image_pose_data.append(f'{qw} {qx} {qy} {qz}')\n image_pose_data.append(f'{qz} {qy} {qx} {qw}')\n image_pose_data.append(' '.join(map(str, t_vector)))\n image_pose_data.append('1')\n image_pose_data.append(f'image{image}.jpg')\n\n file.write(' '.join(image_pose_data) + '\\n\\n')", "def tempWrite(img):\n\tfilename = \"{}.png\".format(os.getpid())\n\tcv2.imwrite(filename, img)\n\treturn filename", "def remove_extra_images(path_to_images: str, number_of_images: int) -> None:\n last_image = 'image' + str(number_of_images) + '.jpg'\n while last_image in listdir(path_to_images):\n last_image_path = path.join(path_to_images, last_image)\n remove(last_image_path)\n print(f\"remove {last_image}\")\n number_of_images += 1\n last_image = 'image' + str(number_of_images) + '.jpg'", "def recad_dir(pattern, vmin, vmax, shape, discard_vol=False,\n only_new=True):\n file_list = glob(pattern)\n file_list.sort()\n for file_name in file_list:\n h5_name = os.path.splitext(file_name)[0] + '.h5'\n print file_name\n if only_new:\n if os.path.exists(h5_name):\n print \"already exists\"\n continue\n recad_to_h5_chunk(file_name, vmin, vmax, shape)\n if discard_vol and os.path.exists(h5_name):\n os.remove(file_name)\n os.remove(file_name + '.info')\n os.remove(file_name + '.xml')", "def do_2003(in_dir, out_dir):\n\n dir_items = setup_outdir_and_get_input(in_dir, out_dir)\n for idx, item in enumerate(dir_items):\n full_path = in_dir + os.path.sep + item\n print(f\"{item} -> {idx}\")\n create_dirs_and_write_files(full_path, idx, in_dir, item, out_dir)", "def order_test_set(path_to_images, path_to_csv, path_to_save_test):\n\n try:\n with open(path_to_csv, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter = ',')\n\n for i, row in enumerate(reader):\n if i == 0: # continue the first line beacuse of the header line\n continue\n\n img_name = row[-1].replace('Test/', '')\n label = row[-2]\n\n path_to_folder = os.path.join(path_to_save_test, label)\n\n if not os.path.isdir(path_to_folder): # if the dir. not exist\n os.makedirs(path_to_folder) # create the directory\n\n img_full_path = os.path.join(path_to_images, img_name)\n\n print(\"Copying \", img_full_path, \" to \", path_to_folder)\n shutil.copy(img_full_path, path_to_folder)\n\n except:\n print(\"[INFO]: Error reading csv file.\")", "def stitch_images(self):\n stitched_folder_name = self.parent_folder + 'stitched'\n print(\"Stitching images in:\")\n print(self.folder_list)\n print(\"Storing in: \" + str(stitched_folder_name))\n\n try:\n print(\"Making dir \" + str(stitched_folder_name) + \" for stitching\")\n os.mkdir(stitched_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this stitching??\")\n return\n\n photo_list = self.get_photo_list(self.parent_folder + '/' + self.folder_list[0])\n # get photo sizes\n print(self.parent_folder + '/' + self.folder_list[0] + '/' + photo_list[0])\n size_photo = cv2.imread(self.parent_folder + '/' + self.folder_list[0] +\n '/' + photo_list[0], cv2.IMREAD_ANYDEPTH)\n photo_height, photo_width = np.shape(size_photo)\n stitched_height = photo_height * 2\n stitched_width = photo_width * 4\n\n for photo in photo_list:\n stitched_photo = np.full((stitched_height, stitched_width), 0)\n\n for i, folder in enumerate(self.folder_list):\n print(i)\n print(folder)\n print(self.parent_folder + folder + '/' + photo)\n\n stitched_photo[(int((float(i) / 4.0)) * photo_height):(int(((float(i) / 4.0) + 1)) * photo_height),\n (int(i % 4) * photo_width):((int((i % 4) + 1)) * photo_width)] \\\n = cv2.imread(self.parent_folder + '/' + folder + '/' + photo, cv2.IMREAD_ANYDEPTH)\n\n stitched_photo = stitched_photo.astype(np.uint16)\n cv2.imwrite(stitched_folder_name + '/' + photo, stitched_photo, [cv2.IMWRITE_PNG_COMPRESSION, 0])\n\n return stitched_folder_name", "def test_save_image(self):\n\n from m3_save_images.m3_save_images import save_images\n folder_destination_name = \"unittest-sorted-images\"\n path_source = \"../img\"\n image_name = [\"00ff00.png\", \"aqua.png\", \"black.jpg\", \"yellow.png\", \"red2.jpg\", \"green.jpg\"]\n image_color = [\"Lime\", \"Aqua\", \"Black\", \"Yellow\", \"Red\", \"Green\"]\n # new empty folder is needed for testing save_image() function\n if os.path.isdir(folder_destination_name):\n shutil.rmtree(folder_destination_name)\n os.mkdir(folder_destination_name)\n # creating folders\n for i in range(0, 4):\n save_images(folder_destination_name, path_source, image_name[i], image_color[i])\n self.assertEqual(''.join(os.listdir(os.path.join(folder_destination_name, image_color[i]))), image_name[i])\n save_images(folder_destination_name, path_source, image_name[i], image_color[5])\n self.assertNotEqual(''.join(os.listdir(os.path.join(folder_destination_name, image_color[i]))), image_name[5])", "def exporting_cropped_images (fpath_tiff):\n src = rasterio.open(fpath_tiff, 'r')\n outfolder_irregular = '/train/irregular'\n outfolder_healthy = '/train/healthy'\n outfolder_concrete = '/train/concrete'\n outfolder_incomplete = '/train/incomplete'\n outfolder_other = '/train/other'\n outfolder = '/train/batch'\n #os.makedirs (outfolder, exist_ok = True)", "def copy_photos(source='.', dest='.'):\n if isdir(source): # if this is a directory,\n for child in listdir(source):\n # compose full path to child\n child = join(source, child)\n if isdir(child):\n copy_photos(child, dest)\n else:\n if guess_type(child)[0] is not None \\\n and (guess_type(child)[0].split('/')[0] == 'image'):\n new = rename(child)\n create_tree(new, dest)", "def change_imagens(current_folder, destination_folder, name=\"crosswalk\", qtd=0, dim=(128, 64)):\n\n img_path = [os.path.join(current_folder, file) for file in os.listdir(current_folder)]\n qtd_img = 1\n\n for img in img_path:\n img_name = os.path.split(img)[1].split(\"/\")[0]\n extension = os.path.split(img_name)[1].split(\".\")[0]\n\n new_name = name\n saved_name = new_name + \"_\" + str(qtd_img + qtd)\n print(img_name + \" -> \" + saved_name + \".jpg\")\n\n try:\n saved_folder = destination + \"/\"\n\n # carrega a imagem\n img = Image.open(current_folder + \"/\" + img_name)\n # converte a imagem (PIL) para numpy array\n imgNp = np.array(img,'uint8')\n # redimensionar a imagem\n imgNp = cv2.resize(imgNp, dim)\n\n # Cria a pasta positivas_final e salva as imagens\n pathlib.Path(saved_folder).mkdir(parents=True, exist_ok=True)\n cv2.imwrite(saved_folder + saved_name + \".jpg\", imgNp)\n\n qtd_img += 1\n\n except ValueError:\n print('.')", "def makeOutDirs(od):\n if args.format.lower() == 'kitti':\n id = \"%s/images\" % od\n ld = \"%s/labels\" % od\n elif args.format.lower() == 'darknet':\n id = \"%s/images\" % od\n ld = \"%s/annotations\" % od\n else:\n print \"Invalid output format %s!\" % args.format\n usage()\n ensureDir(id)\n ensureDir(ld)\n return id, ld", "def split_data(src, dst, ratio=0.2):\n dirs = [f for f in os.listdir(src) if os.path.isdir(os.path.join(src, f)) and not f.startswith('.')]\n for d in dirs:\n src_subdir = os.path.join(src, d)\n dst_subdir = os.path.join(dst, d)\n if not os.path.exists(dst_subdir):\n os.makedirs(dst_subdir)\n imgs = [f for f in os.listdir(src_subdir) if os.path.isfile(os.path.join(src_subdir, f)) and not f.startswith('.')]\n for img in imgs:\n if np.random.uniform() <= ratio:\n move(os.path.join(src_subdir, img), dst_subdir)\n print('split done')", "def test_build_export_path_image(clpipe_fmriprep_dir: Path):\n\n # Build the fMRIPrep input image path\n image_name = \"sub-0_task-rest_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz\"\n fmriprep_dir = clpipe_fmriprep_dir / \"data_fmriprep\" / \"fmriprep\"\n image_path = fmriprep_dir / \"sub-0\" / \"func\" / image_name\n\n # Build the output path\n subject_out_dir = clpipe_fmriprep_dir / \"data_postproc2\" / \"sub-0\"\n\n # Build full export path\n export_path = build_export_path(image_path, \"0\", fmriprep_dir, subject_out_dir)\n\n assert str(export_path) == str(\n subject_out_dir\n / \"func\"\n / \"sub-0_task-rest_space-MNI152NLin2009cAsym_desc-postproc_bold.nii.gz\"\n )", "def cat_files(files, output):\n for file in files:\n with open(file, 'r') as fd:\n shutil.copyfileobj(fd, output)", "def cat_files(files, output):\n for file in files:\n with open(file, 'r') as fd:\n shutil.copyfileobj(fd, output)", "def extract_write_to_file(self, num_extracted, write_dir, sub_h, sub_w, margin=10):\n\n file_seed = len(os.listdir(write_dir))\n\n for i in range(num_extracted):\n file_num = str(file_seed + i)\n write_path = os.path.join(write_dir, file_num + \".\" + 'jpg')\n\n print('extracting {}/{} images of dimension {}x{}'.format(i, num_extracted, sub_h, sub_w))\n print('writting to location: {}'.format(write_path))\n\n self.extract_single(sub_h, sub_w, write_path)", "def save_images(images, save_dir, image_type):\n for image in images:\n raw_img = urllib2.urlopen(image).read()\n count = len([i for i in os.listdir(save_dir) if image_type in i]) + 1\n f = open(save_dir + '/' + image_type + '_' + str(count), 'wb')\n f.write(raw_img)\n f.close()", "def path_to_name(img):\n\n return os.path.dirname(img) + '_' + os.path.basename(img)", "def convert_all(input: str, out: str):\n dateien = listdir(input)\n for datei in dateien:\n out_datei = datei.replace(\" \", \"_\") # Leertasten durch Unterstriche ersetzen\n convert_image(input + datei, out + out_datei)", "def write_img_to_fs(name, data):\n with open(name, \"wb\") as fout:\n fout.write(data)", "def normalize_path(img_file):\n\n img_file = img_file.split('/')[-1]\n img_file = 'data/IMG/'+img_file.split('\\\\')[-1]\n return img_file", "def crop_and_save_images(source_directory, target_directory):\n haar_cascade = cv.CascadeClassifier('haar_face.xml')\n os.chdir(target_directory)\n file_names = os.listdir(source_directory)\n number_of_images = len(file_names)\n total_number_of_digits = math.floor(math.log10(number_of_images)) + 1\n i = 1\n for filename in file_names:\n image = os.path.join(source_directory, filename)\n print(image)\n cropped_image = crop_face_image(image, haar_cascade)\n try:\n if cropped_image.any() is None:\n print('No face detected, skipping image')\n continue\n except AttributeError:\n print('No face detected, skipping image')\n continue\n current_number_of_digits = math.floor(math.log10(i)) + 1\n number_of_zeros = total_number_of_digits - current_number_of_digits\n print(f'Cropping: {filename}, saving to folder {target_directory}')\n newFilename = 'IMG_' + '0'*number_of_zeros + f'{i}.jpg'\n cv.imwrite(newFilename, cropped_image)\n i += 1", "def saveImgs(img, filename=None):\n\tif filename is None:\n\t\tdate = time.strftime(\"%Y%m%d\")\n\t\tfilename = \"T\" + str(date)\n\t\tjpg = \".jpg\"\n\t\tcount = 0\n\t\tfor item in img:\n\t\t\tname = filename + str(count) + jpg\n\t\t\tcv2.imwrite(name, item)\n\t\t\tcount += 1\n\telse:\n\t\tfor i in range(0, len(img)):\n\t\t\tcv2.imwrite(filename[i], img[i])" ]
[ "0.6020291", "0.58817405", "0.5803224", "0.57723814", "0.57670635", "0.57295036", "0.5712866", "0.56850314", "0.56731784", "0.5619671", "0.55809706", "0.5572275", "0.5563066", "0.55071867", "0.55031073", "0.538915", "0.5343728", "0.5336606", "0.5318499", "0.53176695", "0.5316611", "0.53027153", "0.5299702", "0.5276676", "0.5275632", "0.5243651", "0.52414584", "0.5191616", "0.51908463", "0.5183552", "0.51718485", "0.5171376", "0.51673186", "0.5166163", "0.5166163", "0.5164798", "0.5159895", "0.51577723", "0.51466894", "0.51191026", "0.5105412", "0.51012903", "0.5097146", "0.5097039", "0.5095463", "0.5087037", "0.5083879", "0.5081339", "0.5076056", "0.5074391", "0.5053108", "0.50462604", "0.50447124", "0.50446165", "0.5043029", "0.5039403", "0.50290424", "0.5024794", "0.50211895", "0.5019482", "0.5016269", "0.5014021", "0.50109106", "0.50098157", "0.50001466", "0.49981922", "0.49936116", "0.49918625", "0.49913254", "0.4987713", "0.49828133", "0.49737862", "0.4973755", "0.4972991", "0.49690774", "0.4958239", "0.49553755", "0.49470916", "0.49444833", "0.49444512", "0.49387145", "0.4938554", "0.49325183", "0.49312603", "0.49308988", "0.4929141", "0.49257082", "0.49140707", "0.4906089", "0.48970327", "0.4894203", "0.48938707", "0.48938707", "0.48928517", "0.4882542", "0.4881907", "0.48768255", "0.48760873", "0.48702335", "0.4868665", "0.4865804" ]
0.0
-1
return True if images read on file are identical, False otherwise
def compare_image_buffers(imgbuf1, imgbuf2): with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2: img1 = Image.open(imgio1) img2 = Image.open(imgio2) diff = ImageChops.difference(img1, img2) return not diff.getbbox()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __compareImage(self, file1, file2):\n # arg=self.__validateString(str_arg)\n # file1, file2=arg.split(' ', 1)\n try:\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n if img1.size != img2.size:\n return False\n by1 = img1.tobytes()\n by2 = img2.tobytes()\n # format r,g,b,255,r,g,b,255, 3 bytes = 1 point, 255=separator, total 4 bytes\n l = len(by1) / 4\n # total points and same points\n tp = 0\n sp = 0\n for j in range(l):\n i = j * 4\n tp += 1\n if by1[i] == by2[i] and by1[i + 1] == by2[i + 1] and by1[i + 2] == by2[i + 2]:\n sp += 1\n # max to 2% diff allowed\n if tp * 0.98 > sp:\n return False\n else:\n return True\n except Exception, e:\n printLog(self.threadName + \"Exception in __compareImage: %s\" % e.message, logging.ERROR)\n traceback.print_exc()\n return False\n finally:\n img1 = None\n img2 = None", "def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False", "def are_files_equal(file1, file2):\n input_file_1 = open(file1, \"r\")\n input_file_2 = open(file2, \"r\")\n\n file1 = input_file_1.read()\n file2 = input_file_2.read()\n print(type(file1), file1, type(file2), file2)\n\n result =False\n if file1 == file1:\n result = True\n\n input_file_1.close()\n input_file_2.close()\n return result", "def images_are_present(file_info):\n currentdir = os.path.join(WORKDIR, file_info['folder'])\n if not os.path.exists(currentdir):\n return False\n count = len([x for x in os.listdir(currentdir) if x.endswith('.png')])\n if count != file_info['size']:\n print([x for x in os.listdir(currentdir) if x.endswith('.png')])\n print('Count does not match')\n print(count)\n print(file_info['size'])\n return False\n return True", "def is_new_based_on_imgs(soup):\n\n \n \n prev_hashes = get_prev_img_hashes()\n temp_hashes = get_temp_img_hashes(soup)\n\n if len(temp_hashes.difference(prev_hashes))>0:\n print(\"new, based on images\")\n return True\n else:\n return False", "def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None", "def file_present(self,imagefile=None):\n import hashlib\n if self.filesize()==0:\n return False # empty files are never present\n if imagefile==None:\n imagefile=self.imagefile # use this one\n for hashname in ['md5','sha1']:\n oldhash = self.tag(hashname)\n if oldhash:\n newhash = hashlib.new(hashname,self.contents(imagefile=imagefile)).hexdigest()\n return oldhash==newhash\n raise ValueError,\"Cannot process file \"+self.filename()+\": no hash in \"+str(self)", "def _check_consistency_between_imaging_extractors(self):\n return True", "def equal(self, file1, file2):\n\n if file1.size != file2.size:\n return False\n\n # Compare stat\n if self.use_stat and not self._equal_stat(file1, file2):\n return False\n\n # Compare times\n if self.use_times and not self._equal_times(file1, file2):\n return False\n\n # Compare attributes\n if self.use_attributes and not self._equal_attributes(file1, file2):\n return False\n\n # TODO: Optionally diff hashes\n\n return True", "def check_images():\n\n print(f'Looking for duplicate images...')\n\n for image in images_in_directory:\n duplicate = check_image_for_duplicates(image)\n\n if (duplicate):\n print(f'Found {duplicate} to be a duplicate image of: {image}')\n remove_image(duplicate)\n pass", "def check_image_for_duplicates(original_image):\n\n original_image_hash = get_average_hash(original_image)\n\n print(f'Checking for duplicate images for {original_image}')\n\n for potential_duplicate_image in images_in_directory:\n potential_duplicate_image_hash = get_average_hash(\n potential_duplicate_image)\n\n if ((original_image != potential_duplicate_image) & compare_image_hashes(original_image_hash, potential_duplicate_image_hash)):\n return potential_duplicate_image\n\n pass", "def check_duplicate(fp1, fp2):\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False", "def check_md5(file1, file2):\r\n with open(file1, \"rb\") as f1:\r\n h1 = hashlib.md5(f1.read()).digest()\r\n with open(file2, \"rb\") as f2:\r\n h2 = hashlib.md5(f2.read()).digest()\r\n return h1 == h2", "def _compare_file(path1, path2):\n\n try:\n return _open_file(path1) == _open_file(path2)\n except OSError:\n return False", "def assert_image_equal(path1, path2):\n test_im = np.asarray(Image.open(path1))\n ref_im = np.asarray(Image.open(path2))\n npt.assert_array_equal(test_im, ref_im)", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def images_exist(self):\n pass", "def compare(file1, file2):\n\tfrom os.path import exists\n\tresult = False\n\t\n\tfile1 = adaptPath(file1)\n\tfile2 = adaptPath(file2)\n\t\n\t# If two files existing\n\tif exists(file1) and exists(file2):\n\t\t# If the date and size equal\n\t\tif getFileSize(file1) == getFileSize(file2):\n\t\t\ttry:\n\t\t\t\t# Read the content of first file\n\t\t\t\tcontent1 = open(file1, \"rb\").read()\n\t\t\t\ttry:\n\t\t\t\t\t# Read the content of second file\n\t\t\t\t\tcontent2 = open(file2, \"rb\").read()\n\t\t\t\t\t# If content differs\n\t\t\t\t\tif content1 == content2:\n\t\t\t\t\t\tresult = True\n\t\t\t\texcept IOError:\n\t\t\t\t\tpass\n\t\t\texcept IOError:\n\t\t\t\tpass\n\treturn result", "def compare_files(fp1, fp2):\n\n line1 = fp1.readline()\n line2 = fp2.readline()\n\n while line1 and line2:\n if line1.startswith('#') and line2.startswith('#'):\n pass\n elif not line1 == line2:\n return False\n \n line1 = fp1.readline()\n line2 = fp2.readline()\n\n if line1 or line2:\n return False\n\n return True", "def test_image_rw(self):\n from ..image import Image\n from ..io.image import read_image, write_image\n shape = (5,5)\n pix = np.random.uniform(size=shape)\n ivar = np.random.uniform(size=shape)\n mask = np.random.randint(0, 3, size=shape)\n img1 = Image(pix, ivar, mask, readnoise=1.0, camera='b0')\n write_image(self.testfile, img1)\n img2 = read_image(self.testfile)\n\n #- Check output datatypes\n self.assertEqual(img2.pix.dtype, np.float64)\n self.assertEqual(img2.ivar.dtype, np.float64)\n self.assertEqual(img2.mask.dtype, np.uint32)\n\n #- Rounding from keeping np.float32 on disk means they aren't equal\n self.assertFalse(np.all(img1.pix == img2.pix))\n self.assertFalse(np.all(img1.ivar == img2.ivar))\n\n #- But they should be close, and identical after float64->float32\n self.assertTrue(np.allclose(img1.pix, img2.pix))\n self.assertTrue(np.all(img1.pix.astype(np.float32) == img2.pix))\n self.assertTrue(np.allclose(img1.ivar, img2.ivar))\n self.assertTrue(np.all(img1.ivar.astype(np.float32) == img2.ivar))\n\n #- masks should agree\n self.assertTrue(np.all(img1.mask == img2.mask))\n self.assertEqual(img1.readnoise, img2.readnoise)\n self.assertEqual(img1.camera, img2.camera)\n self.assertEqual(img2.mask.dtype, np.uint32)\n\n #- should work with various kinds of metadata header input\n meta = dict(BLAT='foo', BAR='quat', BIZ=1.0)\n img1 = Image(pix, ivar, mask, readnoise=1.0, camera='b0', meta=meta)\n write_image(self.testfile, img1)\n img2 = read_image(self.testfile)\n for key in meta:\n self.assertEqual(meta[key], img2.meta[key], 'meta[{}] not propagated'.format(key))\n\n #- img2 has meta as a FITS header instead of a dictionary;\n #- confirm that works too\n write_image(self.testfile, img2)\n img3 = read_image(self.testfile)\n for key in meta:\n self.assertEqual(meta[key], img3.meta[key], 'meta[{}] not propagated'.format(key))", "def is_RGB(self,img_path):\n image=Image.open(img_path)\n image=np.asarray(image)\n if(len(image.shape)<3):\n return False\n return True", "def has_images(self):\n return len(self.images) > 0", "def samefile(path1, path2):\n try:\n return os.path.samefile(path1, path2)\n except OSError as err:\n if err.errno == 2: # ENOENT\n return False\n else:\n raise", "def check_duplicates(files):\n # If we don't have two files, it's kind of meaningless. True or False\n # would be fine.\n if len(files) < 2:\n return True\n file1 = files[0]\n # Compare every file to the first file. If they're all the same, they're\n # all duplicates. If any one of them isn't, they're not all duplicates\n # so return False.\n for f in files[1:]:\n if not no_diff(file1, f):\n return False\n return True", "def compare_group_images(directory, show_imgs=True, similarity=\"high\", compression=100):\n # list where the found duplicate/similar images are stored\n duplicates = []\n lower_res = []\n\n imgs_matrix = create_imgs_matrix(directory, compression)\n\n # search for similar images\n if similarity == \"low\":\n ref = 13000\n # search for 1:1 duplicate images\n else:\n ref = 21000\n\n main_img = 0\n compared_img = 1\n nrows, ncols = compression, compression\n srow_A = 0\n erow_A = compression\n srow_B = erow_A\n erow_B = srow_B + compression\n\n while erow_B <= imgs_matrix.shape[0]:\n while compared_img < (len(image_files)):\n # select two images from imgs_matrix\n imgA = imgs_matrix[srow_A: erow_A, # rows\n 0: ncols] # columns\n imgB = imgs_matrix[srow_B: erow_B, # rows\n 0: ncols] # columns\n # compare the images\n rotations = 0\n while image_files[main_img] not in duplicates and rotations <= 3:\n if rotations != 0:\n imgB = rotate_img(imgB)\n err = mse(imgA, imgB)\n print ( \"err:\", err)\n if err <= ref:\n if show_imgs == True:\n show_file_info(compared_img, main_img)\n add_to_list(image_files[main_img], duplicates)\n check_img_quality(directory, image_files[main_img], image_files[compared_img], lower_res)\n rotations += 1\n srow_B += compression\n erow_B += compression\n compared_img += 1\n\n srow_A += compression\n erow_A += compression\n srow_B = erow_A\n erow_B = srow_B + compression\n main_img += 1\n compared_img = main_img + 1\n\n msg = \"\\n***\\n DONE: found \" + str(len(duplicates)) + \" duplicate image pairs in \" + str(\n len(image_files)) + \" total images.\\n The following files have lower resolution:\"\n print(msg)\n return set(lower_res)", "def verify(image_path):\n try:\n with Image.open(image_path) as img:\n img.verify()\n return True\n except Exception as e:\n log.warn('Path [{}] does not point to an image: [{}]'.format(image_path, e))\n return False", "def check_if_original(article):\n num_img = len(article.find_all(\"img\"))\n return num_img < 2", "def compare_contents(lhs, rhs):\n for filename in (lhs, rhs):\n if not os.path.exists(filename):\n return False\n\n with open(lhs, \"r\") as lhs_file, open(rhs, \"r\") as rhs_file:\n return lhs_file.read() == rhs_file.read()", "def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True", "def check_duplicate_image_name(image_paths):\n image_names = [os.path.basename(os.path.splitext(p)[0]) for p in image_paths]\n\n num_images = len(image_names)\n\n num_unique = len(set(image_names))\n\n if num_images != num_unique:\n raise ValueError('Found %d duplicate images.' % (num_images - num_unique))\n\n logging.info('Found no duplicates in %d images.', num_images)", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def verifyFile(source, destination):\n\tsourceHash = hashlib.sha256(open(source, 'rb').read()).digest()\n\tdestinationHash = hashlib.sha256(open(destination, 'rb').read()).digest()\n\n\tif sourceHash == destinationHash:\n\t\treturn (True, str(sourceHash))\n\n\treturn False", "def img_compare(file1, file2):\n # read image\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n\n # resize \n size = 128, 128\n img1_res = img_resize(img1, size)\n img2_res = img_resize(img2, size)\n\n img1_res.save(\"img_1.thumbnail\", \"JPEG\")\n img2_res.save(\"img_2.thumbnail\", \"JPEG\")\n\n # convert to gray scale\n img1_grayscale = img1_res.convert('LA')\n img1_grayscale.save(\"img_1_grayscale.png\")\n\n img2_grayscale = img2_res.convert('LA')\n img2_grayscale.save(\"img_2_grayscale.png\")\n\n # normalise\n img1_norm = normalize(np.array(img1_grayscale.getdata()).astype(float))\n img2_norm = normalize(np.array(img2_grayscale.getdata()).astype(float))\n\n try:\n # compare two images\n diff = img1_norm - img2_norm\n m_norm = sum(abs(diff)) # Manhattan norm\n z_norm = norm(diff.ravel(), 0) # Zero norm\n\n # print(\"Manhattan norm:\", m_norm, \"/ per pixel:\", m_norm/img1_norm.size)\n # print(\"Zero norm:\", z_norm, \"/ per pixel:\", z_norm*1.0/img1_norm.size)\n\n return m_norm/img1_norm.size, float(z_norm) / img1_norm.size\n except:\n return 100, 100", "def equal_file_sum(file1_paht, file2_paht):\n md5_sum1 = generate_sum(file1_path)\n md5_sum2 = generate_sum(file2_path)\n return (md5_sum1 == md5_sum2)", "def _is_tracked(filename, metadata):\n current_local_sha = local_metadata.get(filename, None)\n current_remote_sha = metadata.get(filename, None)\n return current_local_sha is not None \\\n and current_remote_sha is not None \\\n and current_local_sha == current_remote_sha", "def check_type(filename):\n try:\n im = Image.read(filename)\n except SanperaError:\n return False\n else:\n return im.original_format in [b'JPEG', b'PNG', b'GIF']", "def is_new_red_camera():\r\n ids = range(15)\r\n for id in ids:\r\n name = 'red{:04d}.fits'.format(id)\r\n if os.path.exists(name):\r\n hdr = pyfits.getheader(name)\r\n if hdr['NAXIS1'] == 4141 or hdr['NAXIS1'] == 4114:\r\n return True\r\n elif hdr['NAXIS1'] == 1024 or hdr['NAXIS1'] == 1124:\r\n return False\r\n else:\r\n raise ValueError('Unexpected image size')\r\n else:\r\n continue\r\n\r\n # raise ValueError('Could not locate red side files')\r\n print 'Could not locate red side files--defaulting to new camera'\r\n return True", "def is_newccd(filename):\n fh = fits.open(filename)\n out = False\n if len(fh) == 4:\n out = True\n\n fh.close()\n return out", "def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):\n \n # If there is no fileinfo currently stored for 'metadata_filename',\n # try to load the file, calculate the fileinfo, and store it.\n if metadata_filename not in self.fileinfo:\n self._update_fileinfo(metadata_filename)\n\n # Return true if there is no fileinfo for 'metadata_filename'.\n # 'metadata_filename' is not in the 'self.fileinfo' store\n # and it doesn't exist in the 'current' metadata location.\n if self.fileinfo.get(metadata_filename) is None:\n return True\n\n current_fileinfo = self.fileinfo[metadata_filename]\n\n if current_fileinfo['length'] != new_fileinfo['length']:\n return True\n\n # Now compare hashes. Note that the reason we can't just do a simple\n # equality check on the fileinfo dicts is that we want to support the\n # case where the hash algorithms listed in the metadata have changed\n # without having that result in considering all files as needing to be\n # updated, or not all hash algorithms listed can be calculated on the\n # specific client.\n for algorithm, hash_value in new_fileinfo['hashes'].items():\n # We're only looking for a single match. This isn't a security\n # check, we just want to prevent unnecessary downloads.\n if hash_value == current_fileinfo['hashes'][algorithm]:\n return False\n\n return True", "def hasImages(self):\n return len(self.getImages()) > 0", "def hasImages(self):\n return len(self.getImages()) > 0", "def _compare_files(self, first_file, second_file):\n\n self.log.info('-' * 80)\n self.log.info('Compare files')\n\n code, out = cmd_exec(['cmp', str(first_file), str(second_file)], shell=False, log=self.log)\n if code:\n self.log.warning('md5 checksum IS NOT SAME with ffmpeg sw decode')\n self.log.warning(out)\n return False\n\n self.log.info('md5 checksum IS SAME with ffmpeg sw decode')\n return True", "def is_jpegxl_recompressed_jpeg_file(filename):\n try:\n with open(filename, 'rb') as h:\n header = h.read(len(JPEGXL_RECOMPRESSED_JPEG_HEADER))\n # Cf. https://arxiv.org/pdf/1908.03565.pdf, section 9.1,\n # on recompressed-JPEG header.\n return header == JPEGXL_RECOMPRESSED_JPEG_HEADER\n except: # pylint:disable=bare-except\n # If anything failed, this means that we cannot establish that the file\n # has the expected header, so we return False.\n return False", "def cmp(f1, f2):\n with open(f1) as f1, open(f2) as f2:\n return f1.read() == f2.read()", "def test_duplicate_images_error(self):\n with self.assertRaises(AssertionError):\n disk.merge_datasets(self.input_datasets, self.output_dataset)\n\n # Original dataset shouldn't be modified.\n self.assertEqual(0, len(self.output_dataset.metadata()))", "def equals(self, image: 'BaseImage') -> bool:\n assert isinstance(image, BaseImage)\n im1 = pygame.image.tostring(self._surface, 'RGBA')\n im2 = pygame.image.tostring(image._surface, 'RGBA')\n return im1 == im2", "def number_of_images_valid():\r\n if number_of_images_a_valid() and number_of_images_b_valid():\r\n return True\r\n else:\r\n return False", "def check_image(df, fname_col, img_dir):\n\n\tfor filename in df[fname_col].values[0:4]:\n\n\t\tif not os.path.isfile(img_dir+filename):\n\t\t\tlogger.error(\"path {} does not exit\".format(img_dir+filename))\n\t\t\tsuccess = False\n\t\telse:\n\t\t\ttry:\n\t\t\t\timg = mpimg.imread(img_dir + filename)\n\t\t\t\tsuccess = True\n\t\t\texcept OSError:\n\t\t\t\tsuccess = False\n\t\t\t\tlogger.error(\"image is {} corrupted/missing\".\n\t\t\t\t\t\t\t\t\t\t\t\t\tformat(filename))\n\t\t\t\t\n\treturn success", "def check_files(self):\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)", "def same_file(wavecar1, wavecar2, wavecar3):\n same = False\n if (filecmp.cmp(wavecar1, wavecar2, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar1, wavecar2))\n same = True\n if (filecmp.cmp(wavecar1, wavecar3, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar1, wavecar3))\n same = True\n if (filecmp.cmp(wavecar2, wavecar3, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar2, wavecar3))\n same = True\n\n if same:\n print(\"It seems that you are using same files to do finite difference, exit\")\n print(\"\\tComment the 'same_file' checker if you know what you are doing\")\n raise SystemExit", "def _compare_jpg_decode_with_pil(test_case, images, print_debug_info=False):\n of_decoded_images = _of_image_decode(images)\n pil_images = [Image.open(image) for image in images]\n # convert image to BGR\n pil_decoded_images = [np.array(image)[:, :, ::-1] for image in pil_images]\n\n for of_decoded_image, pil_decoded_image in zip(\n of_decoded_images, pil_decoded_images\n ):\n of_decoded_image = of_decoded_image.squeeze()\n test_case.assertTrue(len(of_decoded_image.shape) == 3)\n test_case.assertTrue(len(pil_decoded_image.shape) == 3)\n\n diff = of_decoded_image - pil_decoded_image\n diff_index = np.where(diff != 0)\n diff_abs_values = diff[diff_index]\n\n if print_debug_info:\n print(\"of_decoded_image:\\n\", of_decoded_image, of_decoded_image.shape)\n print(\"pil_decoded_image:\\n\", pil_decoded_image, pil_decoded_image.shape)\n print(\"diff_index:\\n\", diff_index)\n print(\"diff_abs_values:\\n\", diff_abs_values)\n print(\n \"of_decoded_image diff:\\n\",\n of_decoded_image[diff_index[0], diff_index[1]],\n )\n print(\n \"pil_decoded_image diff:\\n\",\n pil_decoded_image[diff_index[0], diff_index[1]],\n )\n\n # only green channel has difference of 1\n test_case.assertTrue(np.all(diff_index[-1] == 1))\n test_case.assertTrue(np.all(diff_abs_values == 1))", "def check_md5sum(file1: str, file2: str) -> bool:\n return get_md5_hash(file1) == get_md5_hash(file2)", "def checkImageDimensions(self, filenames):\n\t\ts = None\n\t\thashStr = filenames[:]\n\t\thashStr.sort()\n\t\thashStr = str(hashStr)\n\t\t# check to see if there's already a result of the check for these filenames in the cache\n\t\tif hashStr in self.dimensionCheck:\n\t\t\tLogging.info(\"Using cached result for dimensions check: %s\"%(str(self.dimensionCheck[hashStr])))\n\t\t\treturn self.dimensionCheck[hashStr]\n\t\t\t\n\t\tfor file in filenames:\n\t\t\tif file not in self.imageDims:\n\t\t\t\tprint \"Trying to open\",type(file)\n\t\t\t\ttry:\n\t\t\t\t\tself.ext = file.split(\".\")[-1].upper()\n\t\t\t\t\tif self.ext == \"TIF\":\n\t\t\t\t\t\tself.ext = \"TIFF\"\n\t\t\t\t\tif self.ext == \"JPG\":\n\t\t\t\t\t\tself.ext = \"JPEG\"\n\n\t\t\t\t\tif self.ext == \"VTI\":\n\t\t\t\t\t\treader = vtk.vtkXMLImageReader()\n\t\t\t\t\telse:\n\t\t\t\t\t\treader = eval(\"vtk.vtk%sReader()\"%self.ext)\n\t\t\t\t\treader.SetFileName(file)\n\t\t\t\t\treader.UpdateInformation()\n\t\t\t\texcept IOError, ex:\n\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\traise Logging.GUIError(\"Cannot open image file\", \"Cannot open image file %s\" % file)\n\n\t\t\t\textent = reader.GetDataExtent()\n\t\t\t\tfSize = (extent[1],extent[3])\n\t\t\t\tself.imageDims[file] = fSize\n\t\t\telse:\n\t\t\t\tfSize = self.imageDims[file]\n\t\t\tif s and fSize != s:\n\t\t\t\tx0, y0 = s\n\t\t\t\tx1, y1 = fSize\n\t\t\t\tself.dimensionCheck[hashStr] = False\n\t\t\t\treturn 0\n\t\t\ts = fSize \n\t\t\tfn = file\n\t\tself.dimensionCheck[hashStr] = True\n\t\treturn 1", "def check_files(self):\n print('checking files')\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)", "def is_different(image1, image2):\n gray1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)\n gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)\n\n (score, diff) = compare_ssim(gray1, gray2, full=True)\n diff = (diff * 255).astype(\"uint8\")\n\n thresh = cv2.threshold(diff, 0, 255,\n cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\n return bool(cnts)", "def consistent(self):\n return self.incore_digest == self.ondisk_digest", "def compare(actor, frame):\n urlretrieve(actor, 'actor.jpg')\n\n with open(\"frame_image.jpg\", 'wb') as frame_image:\n frame_image.write(base64.b64decode(frame[23:]))\n\n actor_encoding = face_encodings(load_image_file('actor.jpg'))[0]\n frame_encoding = face_encodings(load_image_file('frame_image.jpg'))\n\n os.remove('actor.jpg')\n os.remove('frame_image.jpg')\n for encoding in frame_encoding:\n if compare_faces([actor_encoding], encoding):\n return True\n return False", "def test_read(self):\n for line in TESTIMAGES.split('\\n'):\n vals = line.strip().split()\n name = vals[0]\n logger.debug(\"Testing file %s\" % name)\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = raxisimage()\n obj.read(os.path.join(os.path.dirname(self.mar), name))\n\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin [%s,%s]\" % (mini, obj.getmin()))\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax [%s,%s]\" % (maxi, obj.getmax()))\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean [%s,%s]\" % (mean, obj.getmean()))\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev [%s,%s]\" % (stddev, obj.getstddev()))\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")\n self.assertNotEqual(obj.dim1, obj.dim2, \"dim2!=dim1\")", "def check_fileName(session) -> 'bool':\n c = get_client()\n cursor = c.find({},{\"size\":1, \"_id\":0})\n print(session)\n for document in cursor:\n print(document)\n if hmac.compare_digest(session, document[\"size\"]):\n return True\n print(\"size \", document[\"size\"])\n return False", "def hasImg(img_name):\n try:\n Image.objects.raw({\"_id\": img_name}).first()\n return True\n except pymodm_errors.DoesNotExist:\n return False", "def assert_data_correct(self) -> bool:\n if not self.training_folder.exists():\n return False\n # 27: number of characters\n # 27*2: 27 original font characters and 27 folders with morphed version\n if len(list(self.training_folder.iterdir())) not in [27, 27 * 2]:\n return False\n # assert that each character folder has the expected number of images inside\n # expected number is repetitions + original, or just original if no morphing\n # took place\n for directory in self.training_folder.iterdir():\n img_count = len(list(directory.iterdir()))\n if img_count != self.repetitions + 1 and img_count != 1:\n return False\n return True", "def check_files(self):\n print('checking files')\n for f in tqdm(self.filenames):\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)", "def hasImage(self):\n if self.getImage():\n return True\n return False", "def _check_images_and_labels(self, image_dir, label_dir):\n return len(os.listdir(image_dir))==len(os.listdir(label_dir))", "def test_is_image(self):\n os.chdir(\"testimages/\")\n self.assertTrue(fileactions.is_image(\"arch_001.jpg\"))\n self.assertFalse(fileactions.is_image(\"not_an_image.jpg\"))", "def check(self, grain=50):\n opengles.glReadPixels(0, 0, self.ix, self.iy,\n GL_RGB, GL_UNSIGNED_BYTE,\n ctypes.byref(self.img))\n r0 = self.img[0:3]\n step = 3 * int(self.ix * self.iy / 50)\n for i in xrange(0, len(self.img)-3, step):\n if self.img[i:(i+3)] != r0:\n return True\n\n return False", "def file_should_be_processed(self, filepath):\n try:\n image.load(filepath)\n return True\n except:\n return False", "def assert_data_fragments_correct(self) -> bool:\n read_path = Path(os.environ[\"DATA_PATH\"]) / \"fragments\"\n if not read_path.exists():\n return False\n bin_images = [img for img in read_path.iterdir() if \"binarized\" in img.name]\n if len(bin_images) == 0:\n return False\n return True", "def __compare_files(self, filename1, filename2):\n self.assertTrue(os.path.isfile(filename1))\n self.assertTrue(os.path.isfile(filename2))\n self.assertEqual(os.path.getsize(filename1), os.path.getsize(filename2))\n with open(filename1, \"rb\") as f1:\n with open(filename2, \"rb\") as f2:\n n_blocks = int(self.args.size) // self.max_block_size\n for i in range(n_blocks):\n self.assertEqual(f1.read(self.max_block_size), \\\n f2.read(self.max_block_size))\n remaining = int(self.args.size) % self.max_block_size\n if remaining > 0:\n self.assertEqual(f1.read(remaining), \\\n f2.read(remaining))", "def _check_flip(origin_imgs, result_imgs):\n h, w, c = origin_imgs.shape\n for i in range(h):\n for j in range(w):\n for k in range(c):\n if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]:\n return False\n return True", "def isopen(self):\n return _image.image_isopen(self)", "def test_sanity_ati1():\n\n with Image.open(TEST_FILE_ATI1) as im:\n im.load()\n\n assert im.format == \"DDS\"\n assert im.mode == \"L\"\n assert im.size == (64, 64)\n\n assert_image_equal_tofile(im, TEST_FILE_ATI1.replace(\".dds\", \".png\"))", "def __is_image_id( self, image_id ):\n images_ids = self.__get_multi_images_ids()\n for id in images_ids:\n if image_id == id:\n return True\n return False", "def check_overwrite(self, filename, workspace):\n if not self.overwrite.value and os.path.isfile(filename):\n try:\n return (\n workspace.interaction_request(\n self, workspace.measurements.image_set_number, filename\n )\n == \"Yes\"\n )\n except workspace.NoInteractionException:\n raise ValueError(\n 'SaveImages: trying to overwrite %s in headless mode, but Overwrite files is set to \"No\"'\n % (filename)\n )\n return True", "def assert_img_equal(request):\n\n testname = request.node.name\n filename = Path(request.module.__file__)\n test_dir = filename.parent / filename.stem\n test_dir.mkdir(exist_ok=True)\n\n def _img_equal(img, index=0):\n expected_file = test_dir / f\"{testname}_{index}.png\"\n actual_file = test_dir / f\"{testname}_{index}_actual.png\"\n if img.ndim == 2:\n cv2.imwrite(str(actual_file), img)\n else:\n img_bgr = img.copy()\n img_bgr[..., :3] = img_bgr[..., :3][..., ::-1]\n cv2.imwrite(str(actual_file), img_bgr) # img is RGB, imwrite expects BGR\n\n if not expected_file.exists():\n raise AssertionError(\n f\"{expected_file} does not exist! Check newly produced img with a command like:\\n\\n feh {actual_file}\\n\\n\"\n )\n\n try:\n pytest.helpers.assert_img_equal(expected_file, img)\n except Exception as e:\n raise AssertionError(f\"{expected_file} differs from {actual_file}\") from e\n\n return _img_equal", "def is_image(mine=None, file=None):\n if file:\n mine = get_file_mine(file)\n print(mine)\n if mine:\n return mine.find('image') != -1\n\n return False", "def compare_image_hashes(image_hash, potential_duplicate_hash):\n\n return image_hash - potential_duplicate_hash < image_hash_comparison_cutoff", "def __lt__(self, img):\r\n ordering = self.config['algorithm_ordering']\r\n ordering = ordering[1:] if ordering.startswith('-') else ordering\r\n\r\n if ordering == \"filename\":\r\n return sorted([self.filename, img.filename])[0] == img.filename\r\n if ordering == 'width':\r\n return self.absolute_width <= img.absolute_width\r\n elif ordering == 'height':\r\n return self.absolute_height <= img.absolute_height\r\n elif ordering == 'area':\r\n return self.absolute_width * self.absolute_height <= img.absolute_width * img.absolute_height\r\n else:\r\n return max(self.absolute_width, self.absolute_height) <= max(img.absolute_width, img.absolute_height)", "def test_on_skimage_png(self):\n from_skimage = diffread(TEST_PNG)\n\n self.assertTupleEqual(from_skimage.shape, (256, 256))\n self.assertTrue(np.allclose(from_skimage, np.ones_like(from_skimage)))", "def rgb(self) -> bool:\n return self.image_shape[2] == 3", "def md5check(fname, md5fname):\n\tmd5fh = open(md5fname, \"r\")\n\treturn (md5sum(fname) == md5fh.readline())", "def verify_media(self):\n self.check_dataset_duplicate_ids(self.media)", "def test_check_wrong_image(self):\n result = analyzer.check_image_color(\"tests/test_files/non_exists.jpg\")\n self.assertEqual(result, \"Image not found\")", "def identify_duplicates(file1, outname, RA=\"RA\", DEC=\"DEC\", dist=1):\n command = (\n f'stilts tmatch1 matcher=sky values=\"{RA} {DEC}\" params={dist} '\n + f\"action=identify in={file1} out={outname}\"\n )\n try:\n subprocess.check_output(command, shell=True, executable=\"/bin/zsh\")\n return True\n except:\n shutil.copyfile(file1, outname)\n return False", "def is_valid_image(image):\n if image not in AVAILABLE_IMAGES.keys():\n return False\n\n return True", "def check_duplicates(self, file_path):\n\t\tif not file_path:\n\t\t\treturn file_path\n\t\tif not self.settings.get('deduplicate_files', True):\n\t\t\t# Deduplication disabled.\n\t\t\treturn file_path\n\t\twas_new, existing_path = hashjar.add_hash(file_path) # Check if the file exists already.\n\t\tif not was_new:\n\t\t\tprint(\"\\tFile already exists! Resolving...\")\n\t\t\t# Quick and dirty comparison, assumes larger filesize means better quality.\n\t\t\tif os.path.isfile(file_path) and os.path.isfile(existing_path):\n\t\t\t\tif os.path.getsize(file_path) > os.path.getsize(existing_path):\n\t\t\t\t\tprint('\\t\\tNew file was better quality. Removing old file.')\n\t\t\t\t\tos.remove(existing_path)\n\t\t\t\t\tfor ele in self.loader.get_elements_for_file(existing_path):\n\t\t\t\t\t\tele.remap_file(existing_path, file_path)\n\t\t\t\t\treturn file_path\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\tOld file was better quality, removing newer file.\")\n\t\t\t\t\tos.remove(file_path)\n\t\t\t\t\treturn existing_path\n\t\treturn file_path", "def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False", "def mapsMatch(m1,m2):\n same = True\n f1 = file(m1,'r').readlines()\n f2 = file(m2,'r').readlines()\n for i, row in enumerate(f1):\n row = row.strip().split()\n row2 = f2[i].strip().split()\n if row[0] <> row2[0]:\n\t same = False\n break\n return same", "def coherent(self):\n return self.uris.size == self.sockets.size", "def is_equal(image_a, image_b, tolerance=0.0):\n return image_diff_percent(image_a, image_b) <= tolerance", "def _assets_are_stale(self, sourcedirectory, cachedirectory):\n comparison = filecmp.dircmp(sourcedirectory, cachedirectory, [], [])\n if comparison.left_only or comparison.right_only:\n # We have files in one directory and not the other\n return True\n if comparison.diff_files:\n # Some of the files have changed\n return True\n\n return False", "def md5_match(file_path, reference_md5):\n\n with open(file_path, \"rb\") as f:\n\n data = f.read()\n\n file_md5 = md5(data).hexdigest()\n\n return file_md5 == reference_md5", "def compare(self, checksum):\n real_checksum = checksum\n if len(checksum) > self.hasher_size:\n real_checksum = checksum[0:self.hasher_size]\n afile = checksum[self.hasher_size:len(checksum)]\n self.path = os.path.join(self.path, afile)\n self.compute()\n return self.real_checksum == real_checksum", "def read_file(filename):\n\tprint(\"Beginning file read...\")\n\tmy_image = cv2.imread(filename)\n\treturn computer_hash(my_image)", "def __eq__(self, other):\n if not isinstance(other, ImageInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def check_sub_image(self, ndvi_filename, input_path):\n rgb_filename = re.sub(\"BWNDVI\",\"RGB\",ndvi_filename)\n rgb_img = Image.open(self.get_file(os.path.join(input_path, rgb_filename),\n self.input_location_type))\n img_ok = check_image_ok(rgb_img, 0.05)\n return img_ok", "def check_already_extracted(video_parts):\n filename_no_ext, _ = video_parts\n return bool(os.path.exists(os.path.join(output_dir,\n filename_no_ext + '-0030.jpg')))", "def check_consistency(trained_model, filename):\n # First, check that the model supports consistency checking (has _source_hash).\n if not hasattr(trained_model, '_source_hash'):\n return True # No check was done (so we assume it's all fine).\n trained_source_hash = trained_model._source_hash\n with open(filename, 'r') as ff:\n code = ff.read()\n m = hashlib.sha256()\n m.update(code.encode())\n true_source_hash = m.hexdigest()\n return trained_source_hash == true_source_hash", "def test_consitency_manual(self):\n name = os.path.basename(self.cbf_filename)\n obj = fabio.open(self.cbf_filename)\n new = fabio.cbfimage.cbfimage(data=obj.data, header=obj.header)\n new.write(os.path.join(self.tempdir, name))\n other = fabio.open(os.path.join(self.tempdir, name))\n self.assertEqual(abs(obj.data - other.data).max(), 0, \"data are the same\")\n for key in obj.header:\n if key in[ \"filename\", \"X-Binary-Size-Padding\"]:\n continue\n self.assertTrue(key in other.header, \"Key %s is in header\" % key)\n self.assertEqual(obj.header[key], other.header[key], \"value are the same for key %s [%s|%s]\" % (key, obj.header[key], other.header[key]))", "def reversible(self) -> bool:\n xy_row = np.column_stack(\n (\n np.linspace(\n -self.imgsz[0] / (2 * self.f[0]),\n self.imgsz[0] / (2 * self.f[0]),\n int(self.imgsz[0]),\n ),\n np.zeros(int(self.imgsz[0])),\n )\n )\n dxy = self._distort(xy_row)\n continuous_row = np.all(dxy[1:, 0] >= dxy[:-1, 0])\n xy_col = np.column_stack(\n (\n np.zeros(int(self.imgsz[1])),\n np.linspace(\n -self.imgsz[1] / (2 * self.f[1]),\n self.imgsz[1] / (2 * self.f[1]),\n int(self.imgsz[1]),\n ),\n )\n )\n dxy = self._distort(xy_col)\n continuous_col = np.all(dxy[1:, 1] >= dxy[:-1, 1])\n return continuous_row and continuous_col" ]
[ "0.73973125", "0.6985823", "0.68735695", "0.6753808", "0.6718825", "0.67187357", "0.67060715", "0.66544104", "0.64797544", "0.6415226", "0.6404903", "0.6333785", "0.629946", "0.62671864", "0.6233922", "0.6232194", "0.61956316", "0.6156336", "0.60619223", "0.6051723", "0.60221314", "0.60185254", "0.60083586", "0.5999905", "0.5990362", "0.5974603", "0.593965", "0.59360605", "0.5934657", "0.59307516", "0.5928271", "0.59277403", "0.58991015", "0.589421", "0.58894527", "0.58879703", "0.58847433", "0.58817774", "0.58618927", "0.58563024", "0.58563024", "0.58527267", "0.5827585", "0.58193266", "0.5805843", "0.58049315", "0.58034694", "0.5799385", "0.5793126", "0.57897717", "0.5769511", "0.5766681", "0.5762456", "0.57545316", "0.57513934", "0.5749852", "0.5748375", "0.57408124", "0.573175", "0.57211345", "0.5720841", "0.57205623", "0.57183206", "0.5718258", "0.57088095", "0.57006985", "0.569202", "0.5686787", "0.5683639", "0.5683189", "0.56773704", "0.56708705", "0.5668705", "0.56630564", "0.5654363", "0.5645765", "0.5645644", "0.56443185", "0.5638958", "0.5631421", "0.56313866", "0.56264657", "0.5625926", "0.56181985", "0.56152946", "0.5606123", "0.5602853", "0.5599952", "0.559895", "0.5590131", "0.55857444", "0.5585567", "0.5576262", "0.5564011", "0.5560918", "0.5556057", "0.55540776", "0.5550908", "0.55478317", "0.55316335" ]
0.57149756
64
Compose html with blogger image urls
def compose_blogger_html(args, title, posts, imgdata, online_videos): for post in posts: for media in post.medias: if type(media) is PostImage: if media.uri not in imgdata: print('Image missing: ', media.uri) else: img_url, resized_url = imgdata[media.uri] media.uri = img_url media.resized_url = resized_url elif type(media) is PostVideo: if not online_videos: print('Video missing: ', media.uri) else: media.iframe = online_videos[0] del online_videos[0] else: assert False return print_html(args, posts, title, '', target='blogger')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replfunc(self, match):\n url = match.group(1)\n imgformat = url.split('.')[-1]\n if url.startswith('http'):\n data = urlopen(url).read()\n elif url.startswith('data'):\n img = '<img src=\"' + url + '\" ' + match.group(2) + ' />'\n return img\n else:\n with open(url, 'rb') as f:\n data = f.read()\n\n self.log.info(\"embedding url: %s, format: %s\" % (url, imgformat))\n b64_data = base64.b64encode(data).decode(\"utf-8\")\n if imgformat == \"svg\":\n img = '<img src=\"data:image/svg+xml;base64,' + \\\n b64_data + '\" ' + match.group(2) + '/>'\n elif imgformat == \"pdf\":\n img = '<img src=\"data:application/pdf;base64,' + \\\n b64_data + '\" ' + match.group(2) + '/>'\n else:\n img = '<img src=\"data:image/' + imgformat + \\\n ';base64,' + b64_data + '\" ' + match.group(2) + ' />'\n return img", "def image_preview(self):\r\n h = '<img src=\"%s\" alt=\"Campaign badge\"/>' % self.image.url\r\n return mark_safe(h)", "def image(self, link, title, alt):\n if not link.startswith(('http://', 'https://')):\n source_dir = os.path.dirname(self.source_path)\n link = os.path.abspath(os.path.join(source_dir, link))\n return '<img src=\"%s\" title=\"%s\" alt=\"%s\" />' % (link, title, alt)", "def create_HTML_a_img(link_url, image_url):\n img = '<img src=\"' + image_url + '\">'\n linked_image = create_HTML_a(link_url, img)\n return linked_image", "def website_create_body(website_info):\r\n body = \"\"\r\n body += H2 + website_info.title + END_H2\r\n body += '\\n' + P + website_info.content + END_P\r\n for image in website_info.images:\r\n if isinstance(image, str):\r\n body += '<img src=\"' + image + CLASS_CENTER\r\n elif isinstance(image, Image):\r\n body += '<img src=\"' + image.name + '\" width=\"' + image.size + CLASS_CENTER\r\n else:\r\n pass\r\n return body", "def url(self):\n\t\treturn self.base_url+\"{}/{}/{}.jpg\".format(self.template,self._escape(self.top_text),self._escape(self.bottom_text))+(\"?\"+\"&\".join([\"{}={}\".format(k,quote(self.kwargs[k])) for k in self.kwargs]) if self.kwargs else \"\")", "def embed_images(self):\n for img in self.book.xpath(\"//img[ not(starts-with(@src, 'data:')) and @src!= '']\"):\n img_src = img.attrib[\"src\"]\n img_raw = self.get_remote_content(img_src)\n if img_raw != None:\n img_64 = base64.b64encode(img_raw)\n file_info = os.path.splitext(img_src)\n ext = file_info[1].replace(\".\", \"\")\n ext = re.sub(\"\\?.*$\", \"\" , ext)\n \n if ext == \"svg\":\n svg = html.fromstring(img_raw.decode(\"utf-8\"))\n img.clear()\n img.tag = \"svg\"\n img[:] = [svg]\n else:\n img.set(\"src\", \"data:image/{};base64,{}\".format(ext, img_64.decode(\"utf-8\")))", "def prepare_for_blogger(args):\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n online_images, online_videos = online_images_url(args)\n\n if args.check_images and check_images(args, posts, online_images) is False:\n pass\n\n html = compose_blogger_html(args, title, posts, online_images, online_videos)\n\n if args.full is False:\n html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)\n html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)\n html = STYLE.replace('%%', '%') + html\n\n if args.dest:\n with open(args.dest, 'wt', encoding='utf-8') as f:\n f.write(html)\n else:\n clipboard.copy(html)", "def create_html(pic_info,sum_pic,upload_path,yun_link=('1','2')):\n save_file=pic_info+'.txt'\n content=\"\"\"\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <span style=\"color: #FF0000; font-size: 24px;\">link: \n </span>\n <a href=\"%s\" target=\"_blank\" \n style=\"font-size: 24px; text-decoration: underline;\">\n <span style=\"font-size: 24px;\">%s\n </span>\n </a> \n <span style=\"font-size: 24px;\">\n <span style=\"color: #FF0000; font-size: 24px;\">code:\n </span>\n %s\n </span>\n </p>\\n\\n\\n\\n\\n\\n\\n\\n\\n\n \"\"\"%(upload_path,sum_pic[0],sum_pic[0],upload_path,sum_pic[1],sum_pic[1],\n upload_path,sum_pic[2],sum_pic[2],upload_path,sum_pic[3],sum_pic[3],\n yun_link[0],yun_link[0],yun_link[1])\n with open(save_file, 'w') as f:\n f.write(content)\n f.close()", "def correct_img_links(body_main_content, schema_name, list_name_image):\n for name_image in list_name_image:\n body_main_content = body_main_content.replace(\n \"src=\\\"\" + name_image + \"\\\"\",\n \"src=\\\"{% static \\\"schema_viewer/oxygen/\" + schema_name + \"/\" + name_image + \"\\\" %}\\\"\"\n )\n return body_main_content", "def banner_wrapper(banner_url):\n # so simple\n return '{url}<img src=\"{url}\" alt=\"{alt}\">'.format(\n url=banner_url,\n alt='Banner'\n )", "def image(self, src, title, text):\n src = escape_link(src)\n text = escape(text, quote=True)\n if title:\n title = escape(title, quote=True)\n html = '<img src=\"%s\" alt=\"%s\" title=\"%s\"' % (src, text, title)\n else:\n html = '<img src=\"%s\" alt=\"%s\"' % (src, text)\n if self.options.get('use_xhtml'):\n return '%s />' % html\n return '%s>' % html", "def image_preview(self):\r\n h = '<img src=\"%s\" alt=\"%s\"/>' % (self.image_resized_url, self.title)\r\n return mark_safe(h)", "def getNewsIconURL(newsBrain):", "def embed_images(self, html):\n if not self.SUPPORT_EMBED_IMAGES:\n raise RuntimeError('%r does not support embed_images' % type(self))\n\n return self.RE_IMG.sub(self._embed_image, html)", "def get_image(result):\n article_id = result['id']\n id_ = article_id[14:]\n href = article_id[:14]\n\n #FIXME: not working\n image_url = \"http://www.jpress.nli.org.il/Olive/APA/NLI_heb/get/GetImage.ashx?kind=block&href=%s&id=%s&ext=.png\" %(href, id_)\n \n return image_url", "def get_image_url():", "def thumbnail_generator():\n website_url = json.loads(request.data.decode())['url']\n try:\n webpage, message = url_preview.send_request(website_url)\n if webpage is not None:\n #Construct the soup object\n soup_object = url_preview.get_soup_object(webpage)\n #Get the title of the artcile\n title = url_preview.get_title(soup_object)\n #Get the website of the article\n website_name = url_preview.get_url(soup_object).rsplit(\".\", 1)[0]\n if website_name is None:\n website_name = website_url.split(\"//\", 1)[1].split(\"/\", 1)[0].rsplit(\".\", 1)[0]\n\n #Get the description of the article\n description = url_preview.get_description(soup_object)\n\n #Get the published date and time of the article\n date_time = url_preview.get_date_time(website_url)\n\n #Get the link to the preview image\n image_url = url_preview.get_preview_image(soup_object)['content']\n\n #Get the link to the favicon\n favicon_url = url_preview. get_favicon(soup_object)\n\n return render_template(\n \"success.html\",\n urlx=website_url,\n title=title,\n site_name=website_name,\n description=description,\n date_time=date_time,\n preview_image=image_url,\n favicon=favicon_url\n )\n except Exception as exp:\n return render_template('error.html', msg=str(exp))", "def get_images(self,soup,Images):\n \n img=soup.find_all('a',href=re.compile(\"/photo.php?fbid=\"))\n img1=soup.find_all('a',href=re.compile(\"/photo\"))\n m=' '\n if img !=[]:\n img_href='https://www.facebook.com'+img[0]['href']\n m+=img_href+'\\n'\n \n elif img1 !=[]:\n img_href='https://www.facebook.com'+img1[0]['href']\n m+=img_href+'\\n'\n \n else:\n img=soup.find_all('a',href=re.compile(\"pcb\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n' \n \n \n else:\n img=soup.find_all('a',href=re.compile(\"photos\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n'\n \n Images.append(m)\n \n return Images", "def hook_image_tag(self, parser, space, name):\n link = name\n caption = name\n params = {}\n\n # Parse the inner syntax, e.g. [[Image:src|option=val|caption]]\n separator = name.find('|')\n items = []\n if separator != -1:\n items = link.split('|')\n link = items[0]\n # If the last item contains '=', it's not a caption\n if items[-1].find('=') == -1:\n caption = items[-1]\n items = items[1:-1]\n else:\n caption = link\n items = items[1:]\n\n # parse the relevant items\n params = self._buildImageParams(items)\n img_path = self._getImagePath(link)\n\n template = jingo.env.get_template('wikiparser/hook_image.html')\n r_kwargs = {'img_path': img_path, 'caption': caption, 'params': params}\n return template.render(**r_kwargs)", "def format_post_background(post: dict) -> str:\n parsed = urllib.parse.urlparse(post['url'])\n logger.debug(f\"Sending post: {post['name']}\")\n if 'i.redd.it' in post['url']:\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <img class=\"\" style=\"width: 100%;\" src=\"{post['url']}\">\n </div>\n \"\"\"\n elif 'v.redd.it' in post['url']:\n if post['media'] is not None:\n return \"\"\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <video style=\"width: 100%;\" data-dashjs-player autoplay src=\"{post['media']['reddit_video']['dash_url']}\" controls></video>\n </div>\n \"\"\"\n else:\n logger.error(f\"Error no media for v.redd.it link: {post['url']}\")\n return \"\"\n elif 'imgur' in post['url'] and ('gif' in post['url'] or 'mp4' in post['url']):\n return \"\"\n imgur_id = parsed.path.split('.')[0].split('/')[-1]\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <video controls poster=\"//i.imgur.com/{imgur_id}.jpg\" preload=\"auto\" autoplay=\"autoplay\" muted=\"muted\" loop=\"loop\" webkit-playsinline=\"\" style=\"width: 100%; height: 100%;\">\n <source src=\"//i.imgur.com/{imgur_id}.mp4\" type=\"video/mp4\">\n </video>\n </div>\n \"\"\"\n elif 'imgur' in post['url']:\n imgur_id = parsed.path.split('.')[0].split('/')[-1]\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <img class=\"\" style=\"width: 100%;\" src=\"//i.imgur.com/{imgur_id}.jpg\">\n </div>\n \"\"\"\n elif 'redgif' in post['url']:\n return \"\"\n redgifs_id = parsed.path.split('.')[0].split('/')[-1]\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <div style='position:relative; padding-bottom:88.67%;'>\n <iframe src='https://redgifs.com/ifr/{redgifs_id}' frameborder='0' scrolling='no' width='100%' height='100%' style='position:absolute;top:0;left:0;' allowfullscreen></iframe>\n </div>\n </div>\n \"\"\"\n elif 'gfycat' in post['url']:\n return \"\"\n else:\n thumbnail = post.get('thumbnail', '/favicon.ico')\n thumbnail = thumbnail if thumbnail != '' else '/favicon.ico'\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <a href=\"{post['url']}\"><img class=\"\" style=\"width: 100%;\" src=\"{thumbnail}\">\n </div>\n \"\"\"\n return \"\"", "def img(self, obj):\n return mark_safe(\n f\"<img src='/{obj.image.url}' width='{obj.image.width}' height='{obj.image.height}' />\"\n )", "def show_me():\n # Scumbag thumbnail code\n try:\n from PIL import Image\n except ImportError:\n pass\n else:\n filename = os.path.join(app.static_folder, 'img', 'badumtss.png')\n image = Image.open(filename)\n\n return render_template('show_me.html')", "async def img(ctx, message):\n \"\"\":param: ctx\"\"\"\n \"\"\":param: message\"\"\"\n \"\"\"return image url\"\"\"\n link_list = []\n\n url = \"http://imgur.com/search?q=\" + message\n response = urlopen(url)\n html = response.read()\n soup = BeautifulSoup(html, \"lxml\")\n for a in soup.find_all('a', href=True):\n if((a['href'][0:9]) == \"/gallery/\"):\n link_list.append(\"https://imgur.com/\" + a['href'])\n if(len(link_list) >=1):\n random_num = random.randint(0, len(link_list) - 1)\n await bot.say(link_list[random_num])\n else:\n await bot.say(\"there is no contente for \"+message)", "def get_content(self):\n\n self.content = self.book.get_template('cover')\n\n tree = parse_string(super(EpubCoverHtml, self).get_content())\n tree_root = tree.getroot()\n\n images = tree_root.xpath('//xhtml:img', namespaces={'xhtml': NAMESPACES['XHTML']})\n\n images[0].set('src', self.image_name)\n images[0].set('alt', self.title)\n\n tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True)\n\n return tree_str", "def parse_template(data, template):\n img_html = \"\"\"<div class=\"thumb-wrap\"><div class=\"thumb-holder\"></div><a href=\"{{URL}}\" target=\"_top\"><div class=\"thumb-img\" style=\"background-image:url('{{IMG}}');\"></div></a></div>\"\"\"\n template = template.replace('{{URL}}', data['link'].replace('http:','https:'))\n template = template.replace('{{URLX}}', data['link'])\n template = template.replace('{{TITLE}}', data['title'])\n #template = template.replace('{{BLURB}}', data['summary'])\n img_html = img_html.replace('{{URL}}', data['link'].replace('http:','https:'))\n if hasattr(data, 'tags') and len(data['tags']) > 0:\n template = template.replace('{{SECTION}}', data['tags'][0]['term'])\n else:\n template = template.replace('<h2><a href=\"{{URL}}\" target=\"_top\">{{SECTION}}</a></h2>', '')\n if hasattr(data, 'media_content') and len(data['media_content']) > 0:\n template = template.replace('{{IMG}}', '%s?w=150' % data['media_content'][0]['url'].replace('http:','https:'))\n else:\n template = template.replace(img_html, '')\n\n return template", "def index():\n\n return \"\"\"\n <div>\n <h1> Image Captioning REST API </h1>\n <h3> The following API end points are valid </h3>\n <ul>\n <h4> Inception V3 </h4>\n <li> <code>/inception/v3/ping </code> - <br/>\n <b> Description : </b> checks availability of the service. returns \"pong\" with status 200 when it is available\n </li>\n <li> <code>/inception/v3/caption/image</code> - <br/>\n <table>\n <tr><th align=\"left\"> Description </th><td> This is a service that can caption images</td></tr>\n <tr><th align=\"left\"> How to supply Image Content </th></tr>\n <tr><th align=\"left\"> With HTTP GET : </th> <td>\n Include a query parameter <code>url </code> which is an http url of JPEG image <br/>\n Example: <code> curl \"localhost:8764/inception/v3/caption/image?url=http://xyz.com/example.jpg\"</code>\n </td></tr>\n <tr><th align=\"left\"> With HTTP POST :</th><td>\n POST JPEG image content as binary data in request body. <br/>\n Example: <code> curl -X POST \"localhost:8764/inception/v3/caption/image\" --data-binary @example.jpg </code>\n </td></tr>\n </table>\n </li>\n <ul>\n </div>\n \"\"\"", "def get_thumbnail_url():", "def get_image_link():\n image_links = set()\n supplemented_keyword = urllib.parse.quote(\n supplemented_keywords[random.randint(0,\n len(supplemented_keywords) - 1)],\n safe='')\n main_keyword = urllib.parse.quote(\n main_keywords[random.randint(0,\n len(main_keywords) - 1)], safe='')\n\n # print('the theme of cats: ' + supplemented_keyword)\n\n search_query = (main_keyword + ' ' + supplemented_keyword).replace(\n ' ', '%20')\n url = 'https://www.google.com/search?q=' + \\\n search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n while 'https://' not in image_link or r'\\\\u' in image_link or '.jpg' not in image_link:\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n return image_link", "def prettyformat(self):\n \n import re\n\n html = self.get_htmlsrc()\n if type(html) == type([]):\n html = html[0]\n if type(html) != type(\"\"):\n try:\n html = str(html)\n except:\n html = html.__str__()\n \n tmp = BeautifulSoup(html)\n base = self.target_baseurl()\n# aitems = tmp.findAll(\"a\",href=re.compile(\"^\\/\"))\n aitems = tmp.findAll(\"a\",href=re.compile(\"^[^hH]\"))\n for i in aitems:\n u = i['href']\n if u[0] != '/':\n i['href'] = base + '/' + u\n else: \n i['href'] = base + u\n# imgitems = tmp.findAll(\"img\",src=re.compile(\"^\\/\"))\n imgitems = tmp.findAll(\"img\",src=re.compile(\"^[^hH]\"))\n for j in imgitems:\n v = j['src']\n if v[0] != '/':\n j['src'] = base + '/' + v\n else: \n j['src'] = base + v\n return tmp", "def wiki_image(pagetext):\n images = [i for i in pagetext.images if i not in EXCLUDED_IMAGES]\n if len(images) > 0:\n return images[0]\n else:\n return ''", "def parse_content(self, node):\n\n if node is None:\n return ''\n\n for image_node in node.find_all('img'):\n source = image_node.get('src')\n image_node['src'] = self.get_link(source)\n\n return str(node)", "def _handle_img_tag(self, attrs):\n alt = \"\"\n image_format = \"\"\n image_id = 0\n for key, value in attrs:\n if key == \"alt\":\n alt = value\n if key == \"class\":\n image_format = value.split()[1].replace(\"-\", \"\")\n if key == \"src\":\n filename, _, extension = value.split(\"/\")[-1].split(\".\")\n image_id = ImageModel.objects.get(file__endswith=f\"{filename}.{extension}\").id\n return f\"<embed alt='{alt}' embedtype='image' format='{image_format}' id='{image_id}'/>\"", "def bb_forum(hit):\n try:\n forum_slug = hit.group(1)\n f = Forum.objects.get(slug=forum_slug)\n return '<a href=\"%s\"><img src=\"%snewspaper.png\" alt=\"forum\" border=\"0\" /> %s</a>' % (f.get_absolute_url(), settings.MEDIA_URL, f)\n except:\n return \"[forum]%s[/forum]\" % (forum_slug)", "def image_capture_demo():\n return render_template('image_capture_demo.html')", "def avatar_preview(self):\r\n h = '<img src=\"%s\" alt=\"%s\"/>' % (self.image_avatar_url, self.title)\r\n return mark_safe(h)", "def l10n_img(ctx, url):\n return static(l10n_img_file_name(ctx, url))", "def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'instructions_html': self.instructions,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user, self.annotation_token_secret),\r\n 'tag': self.instructor_tags,\r\n 'openseadragonjson': self.openseadragonjson,\r\n }\r\n\r\n return self.system.render_template('imageannotation.html', context)", "def preview_tag(self, pid):\n data = self.api.get_product_odata(pid)\n url = data['quicklook_url']\n user = os.environ['HUB_USER']\n password = os.environ['HUB_PASS']\n r = requests.get(url, auth=HTTPBasicAuth(user, password))\n c = r.content\n c64 = base64.b64encode(c)\n tag_template = '<img width=\"150\" height=\"150\" src=\"data:image/jpg;base64, {}\">'\n return tag_template.format(c64.decode('utf-8'))", "def test_lazy_images(self):\n sample = load_sample('wired.sample.html')\n doc = Document('http://www.wired.com/design/2014/01/will-influential-ui-design-minority-report/', sample)\n article = doc.get_clean_article()\n self.assertIn('<img src=\"http://www.wired.com/images_blogs/design/2014/01/her-joaquin-phoenix-41-660x371.jpg\"', article)", "def image(self, text):\n pattern = re.compile(r\"\"\"\n (?:[\\[{])? # pre\n \\! # opening !\n (\\<|\\=|\\>)? # optional alignment atts\n (%s) # optional style,class atts\n (?:\\. )? # optional dot-space\n ([^\\s(!]+) # presume this is the src\n \\s? # optional space\n (?:\\(([^\\)]+)\\))? # optional title\n \\! # closing\n (?::(\\S+))? # optional href\n (?:[\\]}]|(?=\\s|$)) # lookahead: space or end of string\n \"\"\" % self.c, re.U | re.X)\n return pattern.sub(self.fImage, text)", "def add_image_to_html(self, image_path):\r\n file_object = open(self.file_name, 'a+')\r\n html_content = f'<div><img src={image_path} width=\"500\" height=\"300\"></div>'\r\n file_object.write(html_content)", "def process_image_url(value, field):\n if field.width:\n if not value:\n return u\"无\"\n return mark_safe(\"\"\"\n <a href=\"{0}\" target=\"_blank\"><img src=\"{0}\" width=\"{1}\" a>\n \"\"\".format(absolute_media_path(value), field.width))\n # only show url address.\n elif value:\n shorten_value = ''\n if len(value) > 20:\n shorten_value = value[0:12] + \"...\"\n return mark_safe(\"\"\"\n <a href=\"{0}\" target=\"_blank\" title=\"{0}\" >{1}</a>\n \"\"\".format(absolute_media_path(value), shorten_value if shorten_value else value))\n else:\n return \"\"", "def getimgs():", "def make_page(keyword, html):\n output = []\n addline = output.append\n\n addline('<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">')\n addline('<html>')\n addline('<head>')\n addline('<title>Tweets for #%s</title>' % (keyword))\n addline('<style type=\"text/css\">')\n addline('div.tweet_container { float: right; width: 400px; font-size: 0.8em; border: 2px solid darkblue; padding: 8px; height: 80%; overflow-y: scroll; }')\n addline('div.tweet_container h3 { margin: 0; padding: 0; text-align: center; color: darkblue; margin-bottom: 6px; padding-bottom: 6px; border-bottom: 2px solid darkblue; }')\n addline('div.tweet { clear: both; }')\n addline('div.tweetphoto { float:left; width: auto; margin: 0; padding: 0; }')\n addline('div.tweetphoto img { border: none; width: 48px; height: 48px; display: block; margin: 0; margin-bottom: 24px; margin-right: 8px; padding: 0; }')\n addline('div.tweetphoto a > img { border: 1px solid black; }')\n addline('</style>')\n addline('</head>')\n addline('<body>')\n\n addline(html)\n\n addline('</div>')\n addline('</body>')\n addline('</html>')\n return '\\n'.join(output)", "def process_images(text):\n # if text != None:\n if text is not None:\n soup = BeautifulSoup(str(text), 'html.parser')\n img = soup.img\n try:\n image = img['title']\n return image\n except (TypeError, KeyError):\n # print(img)\n pass", "def background_image(self, **kwargs):\n try:\n asset = self.app.module_map.uploader.get(self.barcamp.background_image)\n except AssetNotFound:\n asset = None\n if not asset:\n return u\"\"\n v = asset.variants['full']\n url = self.app.url_for(\"asset\", asset_id = v._id)\n amap = html_params(**kwargs)\n return \"\"\"<img src=\"%s\" width=\"%s\" height=\"%s\" %s>\"\"\" %(\n url,\n v.metadata['width'],\n v.metadata['height'],\n amap)", "def get_image_url(article):\n get_image = article.find_all(\"img\")[0]\n return get_image.get(\"src\").replace(\"../../\", \"http://books.toscrape.com/\")", "async def images(self, ctx, *, query: str=None):\n # Handle empty query\n if query is None:\n return await ctx.error('Please provide a query!')\n\n # Using these specific headers and \"lnms\" as source, will provide divs with \"rg_meta\" classes,\n # The modern image search page being JS rendered, data in these divs are jsons with raw image URLs\n # Old image search pages, only have thumbnails and a direct link to websites\n params = {'q': quote_plus(query), 'source': 'lmns', 'tbm': 'isch'}\n async with self.aiohttp_session.get(self.url, params=params, headers=self.image_headers) as r:\n html = await r.text()\n\n # Healthy\n soup = BeautifulSoup(html, 'lxml')\n\n # Go over 4 items, json.loads the item text, and grab \"ou\" probably stands for \"original url\"\n images = []\n for i, item in enumerate(soup.select('div.rg_meta')[:4]):\n js = json.loads(item.text)\n images.append((f\"{i+1}. {js['st']} - {js['s']}\", js[\"ou\"]))\n newl = '\\n'\n await ctx.message.edit(content=f\"```py\\n{newl.join([x[0] for x in images])}\"\n f\"\\n# Choose the appropriate number or type 0 to leave\\n```\")\n\n def check(m):\n return m.author == ctx.author and m.content.isdigit() and m.channel == ctx.channel\n message = await self.bot.wait_for('message', check=check)\n if message.content == \"0\":\n await message.delete()\n return await ctx.message.delete()\n choice = int(message.content) - 1\n await message.delete()\n await ctx.message.edit(content=images[choice][1])", "def get_image_qm(html_src, todir):\n #print url\n\n img_url, title = img_details(html_src)\n \n r = requests.get(img_url)\n with open(todir+title+'.jpg','wb') as f:\n f.write(r.content)", "def make_content(museum, image_url, image_name, image_artist, filename):\n message = \"From the \" + museum\n # if image_name is not None:\n # message += \" with title \" + image_name\n if image_artist is not None:\n message += \" by \" + image_artist\n\n r = requests.get(image_url)\n if r.status_code == 200:\n with open(filename, mode=\"wb\") as image:\n for chunk in r:\n image.write(chunk)\n else:\n return None\n return (message)", "def html_to_text_img(url):\n if url is '' or url is None:\n return None\n try:\n html = urlopen(url)\n except HTTPError as e:\n print(e)\n return None\n except URLError as e:\n print(e)\n return None\n\n try:\n soup = BeautifulSoup(html.read(), \"html.parser\")\n except AttributeError as e:\n print(e)\n return None\n\n title = soup.find(\"h1\").get_text()\n text = ''\n ps = soup.find(\"div\", {\"class\": \"article gtm-click\"}).find_all('p')\n for p in ps:\n text += ''.join(p.get_text())\n imgs = soup.select(\"div.article__image img\")\n img_url = imgs[0]['data-src']\n\n return title, text, img_url", "def get_images(self, article: BeautifulSoup):\n images = []\n content = article.select_one(self.parsing_template.content)\n\n if content:\n body_images = content.select(self.parsing_template.image_element)\n else:\n body_images = None\n\n if body_images:\n for element in body_images:\n\n img = element.find('img')\n if not img:\n continue\n url = img.get(self.parsing_template.image_attribute) # TODO format url correctly\n\n try:\n text = self.get_text(element, self.parsing_template.image_text)\n except IndexError:\n text = ''\n\n try:\n photographer = self.get_text(element, self.parsing_template.image_photographer)\n except IndexError:\n photographer = ''\n\n # Image text and photographer is not separated.\n # Tries to separate out the photographer\n if self.parsing_template.photographer_delimiter:\n if text and not photographer:\n text, photographer = self.parse_photographer(text, text)\n if photographer:\n text, photographer = self.parse_photographer(text, photographer)\n\n if url:\n if photographer:\n # Removes unwanted text in the photographer\n for replace in self.parsing_template.photograph_ignore_text:\n photographer = photographer.replace(replace, '')\n photographer = photographer.replace('/', ',')\n\n if len(text) > 255:\n text = text[:254]\n\n # Separate each photograph\n photographers = []\n for photograph in photographer.split(','):\n photographer_name_split = list(filter(lambda x: x or x != ' ', photograph.split(' ')))\n if photographer_name_split:\n if len(photographer_name_split) == 1:\n lastName = photographer_name_split[0].strip(' ').strip('.')\n firstName = ''\n else:\n firstName = photographer_name_split[0].strip(' ')\n lastName = photographer_name_split[1].strip(' ').strip('.')\n photographers.append(Photographer(firstName=firstName, lastName=lastName))\n\n images.append((ArticleImage(url=url, text=text), photographers))\n\n return images", "def logosmall(self):\n try:\n asset = self.app.module_map.uploader.get(self.barcamp.logo)\n except AssetNotFound:\n asset = None\n if not asset:\n return u\"\"\n v = asset.variants['medium_user']\n url = self.app.url_for(\"asset\", asset_id = v._id)\n return \"\"\"<a href=\"%s\"><img src=\"%s\" width=\"%s\" height=\"%s\"></a>\"\"\" %(\n self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug),\n url,\n v.metadata['width'],\n v.metadata['height'])", "def insert_image(self, image64: str, class_txt: str, height: int, width: int) -> str:\n if (height is None) & (width is None):\n image = \"\"\"<div class=\"text-center\">\n <img class=\"resize\" src=\"data:image/png;base64, \"\"\" + image64 + \"\"\"\"/>\\n\n </div>\"\"\"\n else:\n image = \"\"\"<div>\n <img class=\"\"\" + class_txt + \"\"\" src=\"data:image/png;base64, \"\"\" + image64 + \"\"\"\"\" height=\"\"\" + str(\n height) + \"\"\" width=\"\"\" + str(\n width) + \"\"\"/>\\n\n </div>\n \"\"\"\n\n self.html_doc = self.html_doc + image\n return self.html_doc", "def extract_images_url(url, source):\n if source == \"mangaseeonline\":\n r = s.post(\n \"http://playwright:5000/scrape\",\n json={\n \"url\": url.replace(\"-page-1\", \"\"), \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[@id=\"TopPage\"]/descendant::img/@src')\n if source == \"nettruyen\":\n r = s.get(\n settings.SPLASH_URL, params={\n \"url\": url.replace(\"-page-1\", \"\"), \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[@class=\"reading-detail box_doc\"]/div/img/@src')\n if source == \"doctruyen3q\":\n r = s.get(\n settings.SPLASH_URL, params={\"url\": url, \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[contains(@id, \"page_\")]/img/@src')\n if source == \"truyenkinhdien\":\n r = s.get(\n settings.SPLASH_URL.replace(\"render.html\", \"execute\"),\n params={\"url\": url, \"lua_source\": lua_script, \"wait\": 1},\n )\n tree = html.fromstring(r.json()[\"html\"])\n return tree.xpath(\n '//*[@class=\"sgdg-gallery\"]/a[not(contains(@style,\"display:none\"))]/img/@src'\n )", "def mk_image_tag(self, uri: str) -> str:\n\n data_uri, width, height = self.webserver.get_image(f'{uri}&features=unstable')\n return f'<img src=\"{data_uri}\" width=\"{width}\" height=\"{height}\"/>'", "def get_album_art_url(html):\n\treturn re.findall('img src=\"(.*?)\" width=\"500\"', html)[0]", "def injectImage(self, img_info, open_url):\n injected_img = None\n\n if img_info.size/1024 < 256:\n encoded = base64.b64encode(open_url.read())\n injected_img = ('<img src=\"data:image/jpeg;charset=utf-8;base64,' +\n str(encoded) +\n '\" %s />' % self.getModifiers(img_info))\n else:\n image = Image.open(StringIO.StringIO(open_url.read()))\n image.thumbnail((self.cfg().chatimg.max_width, self.cfg().chatimg.max_height), Image.ANTIALIAS)\n trans = StringIO.StringIO()\n image.save(trans, format=\"JPEG\")\n encoded = base64.b64encode(trans.getvalue())\n injected_img = ('<img src=\"data:image/jpeg;charset=utf-8;base64,' +\n str(encoded) +\n '\" />')\n\n return injected_img", "def add_three_images(document_text, image_link_template, image_folder, images, width):\n for idx in range(3):\n document_text += \"\\n\"\n image_info = r'<td>{0}</td>'.format(image_link_template.format(\n os.path.join(image_folder, images[idx]), width))\n document_text = add_document_text(document_text, image_info)\n return document_text", "def add_to_html(self, news: dict) -> str:\n\n add_to_html_file = \"\"\n add_to_html_file += f\"<h2>Title: {news['Title']}</h2>\\n\"\n add_to_html_file += f\"<a href={news['Link']}>Link to news</a><br>\\n\"\n add_to_html_file += f\"<p>PubDate: {news['PubDate']}</p>\\n\"\n add_to_html_file += f\"<p>Source: {news['Source']}</p>\\n\"\n full_path_to_image = f\"{self.full_path_to_image_cache}{os.sep}{news['ImageCacheName']}\"\n add_to_html_file += f\"<img height='120' src='{full_path_to_image}' alt='No image'>\\n<br>\\n<br>\\n\"\n\n return add_to_html_file", "def build_md_once(self, index, tid):\n url = self.joint_url(tid)\n title_json = proxy_req(url, 1)\n if not title_json:\n if can_retry(url, index):\n self.build_md_once(index, tid)\n return\n content = BeautifulSoup(\n title_json['content'], 'html.parser').find_all('div')\n text = []\n img_href = []\n img_id = 1\n ttid = 1\n img_title = self.find_title(index).split('/')[1][:-3]\n for word in content:\n temp_text = ''\n if word.span and len(word.span.text) and not word.span.text[0].isdigit:\n temp_text = '## ' + word.span.text\n ttid = 1\n if word.img:\n temp_text = '![image](img/' + img_title + str(img_id) + '.jpg)'\n img_href.append(word.img['src'].replace('https', 'http'))\n img_id += 1\n\n if not len(temp_text):\n temp_text = word.text\n if len(temp_text) and temp_text[0].isdigit():\n temp_text = str(ttid) + '. **' + \\\n ' '.join(temp_text.split('\\xa0')[1:]).strip() + '**'\n ttid += 1\n if len(temp_text) and temp_text[0:2] == '//':\n temp_text = str(ttid) + '. **' + \\\n ' '.join(temp_text.split('\\xa0')[2:]).strip() + '**'\n ttid += 1\n if len(temp_text) and (temp_text[0] == '¥' or temp_text[0] == '€'):\n temp_text = '<a>' + temp_text + '</a>'\n text.append(temp_text)\n with codecs.open(data_dir + self.find_title(index), 'w', encoding='utf-8') as f:\n f.write('\\n'.join(text))\n self.img_map[index] = img_href\n print(index, len(img_href))", "def get_image_url(img):\n # element['data-src'] and element.get('data-src') doesn't work\n for k, v in img.items():\n if k == 'data-src':\n # https://t.nhentai.net/galleries/<gallerycode>/<page#>t.<extension>\n # https://i.nhentai.net/galleries/<gallerycode>/<page#>.<extension>\n return v[:8] + 'i' + v[9:32] + v[32:].replace('t.', '.', 1)", "def gallery():\n return render('base.html')", "def _repr_html_(self):\n import io\n import base64\n from PIL import Image\n\n library_name = \"vedo.assembly.Assembly\"\n help_url = \"https://vedo.embl.es/docs/vedo/assembly.html\"\n\n arr = self.thumbnail(zoom=1.1, elevation=-60)\n\n im = Image.fromarray(arr)\n buffered = io.BytesIO()\n im.save(buffered, format=\"PNG\", quality=100)\n encoded = base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n url = \"data:image/png;base64,\" + encoded\n image = f\"<img src='{url}'></img>\"\n\n # statisitics\n bounds = \"<br/>\".join(\n [\n vedo.utils.precision(min_x, 4) + \" ... \" + vedo.utils.precision(max_x, 4)\n for min_x, max_x in zip(self.bounds()[::2], self.bounds()[1::2])\n ]\n )\n\n help_text = \"\"\n if self.name:\n help_text += f\"<b> {self.name}: &nbsp&nbsp</b>\"\n help_text += '<b><a href=\"' + help_url + '\" target=\"_blank\">' + library_name + \"</a></b>\"\n if self.filename:\n dots = \"\"\n if len(self.filename) > 30:\n dots = \"...\"\n help_text += f\"<br/><code><i>({dots}{self.filename[-30:]})</i></code>\"\n\n allt = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style='text-align: center; vertical-align: center;'><br/>\",\n help_text,\n \"<table>\",\n \"<tr><td><b> nr. of objects </b></td><td>\"\n + str(self.GetNumberOfPaths())\n + \"</td></tr>\",\n \"<tr><td><b> position </b></td><td>\" + str(self.GetPosition()) + \"</td></tr>\",\n \"<tr><td><b> diagonal size </b></td><td>\"\n + vedo.utils.precision(self.diagonal_size(), 5)\n + \"</td></tr>\",\n \"<tr><td><b> bounds </b> <br/> (x/y/z) </td><td>\" + str(bounds) + \"</td></tr>\",\n \"</table>\",\n \"</table>\",\n ]\n return \"\\n\".join(allt)", "def _formatBody(self, body_contents):\n body_contents = str(body_contents)\n\n # Replace github image tag ![alt](src) with plain url\n p = re.compile('!\\[.*?\\]\\((.*?)\\)')\n body_contents = p.sub('\\g<1>', body_contents)\n\n # Replace github image tag <img> with plain url\n p = re.compile('<img.*src=\"(.*?)\".*>')\n body_contents = p.sub('\\g<1>', body_contents)\n\n # Replace ``` with [code] tag\n p = re.compile('```(.*?)(```|$)', re.DOTALL)\n body_contents = p.sub('[code]\\g<1>[/code]', body_contents)\n\n return self._cutBody(body_contents)", "def get_content(html_soup):\n text_above_image = html_soup.findAll('div', attrs = {\"class\" : \"rs-content abstract\"})\n if len(text_above_image) > 1:\n text_above_image = text_above_image[1].get_text()\n else:\n text_above_image = '' \n text_below_image = html_soup.find('div', attrs = {\"class\" : \"body\"}).get_text()\n content = text_above_image + text_below_image\n \n return content", "def get_images(self, page_number):", "def add_to_fb2(self, news: dict) -> str:\n\n add_to_fb2_file = \"\"\n add_to_fb2_file += f\" <p>Title: {news['Title']}</p>\\n\"\n add_to_fb2_file += f\" <p><a l:href='{news['Link']}'> 'Link to news' </a></p>\\n\"\n add_to_fb2_file += f\" <p>PubDate: {news['PubDate']}</p>\\n\"\n add_to_fb2_file += f\" <p>Source: {news['Source']}</p>\\n\"\n\n if news['ImageCacheName']:\n add_to_fb2_file += f\" <p><image l:href='#{news['ImageCacheName']}'/></p>\\n\"\n with open(f\"{self.full_path_to_image_cache}{os.sep}{news['ImageCacheName']}\", \"rb\") as img_file:\n b64_string = base64.b64encode(img_file.read())\n self.binaries += f\"<binary id='{news['ImageCacheName']}' \" \\\n f\"content-type='image/jpeg'>{b64_string.decode('utf-8')}</binary>\\n\"\n\n add_to_fb2_file += f\" <empty-line/>\\n\"\n\n return add_to_fb2_file", "def return_image(val, model_id, message_name, field_name, mime, sind):\n column_data_source = curdoc().get_model_by_name(sind)\n index = column_data_source.tags[0]\n url = \"http://{0}/image/\".format(_host) + \"---\".join([model_id, message_name, field_name, mime, sind, str(index)])\n return url", "def generate_file_link_html_from_url(self, s3_public_url, file_name):\r\n image_link = \"\"\"\r\n <a href=\"{0}\" target=\"_blank\">{1}</a>\r\n \"\"\".format(s3_public_url, file_name)\r\n return image_link", "def render_social_media_metadata(self):\n sm_metadata_html = SM_METADATA_TEMPLATE.format(\n self.view.permalink,\n self.view.img_urls[1],\n self.view.img_urls[0],)\n return sm_metadata_html", "def create_base_image(self, builder, template, parameters):", "async def getImageURLS(self, tags, fuzzy=False, singlePage=False):\n if fuzzy:\n tags = tags.split(\" \")\n for tag in tags:\n tag = tag + \"~\"\n temp = \" \"\n tags = temp.join(tags)\n print(tags)\n num = await self.totalImages(tags)\n if num != 0:\n PID = 0\n imgList = []\n XML = None\n t = True\n tempURL = self.urlGen(tags=tags, PID=PID)\n while t:\n with async_timeout.timeout(10):\n async with self.session.get(url=tempURL) as XML:\n XML = await XML.read()\n XML = ET.XML(XML)\n XML = self.ParseXML(XML)\n if XML is None:\n return None\n if len(imgList) >= int(XML['posts']['@count']): # \"if we're out of images to process\"\n t = False # \"end the loop\"\n else:\n for data in XML['posts']['post']:\n imgList.append(str(data['@file_url']))\n if singlePage:\n return imgList\n PID += 1\n return imgList\n else:\n return None", "def embed_image_html(image, type):\n if type == 'dehaze':\n image_pil = Image.fromarray((image).astype('uint8'))\n elif type == 'style_transfer':\n image_pil = Image.fromarray((image).astype('uint8'))\n else:\n image_pil = Image.fromarray((255 * image).astype('uint8'))\n if sys.version_info.major == 2:\n string_buf=StringIO.StringIO()\n image_pil.save(string_buf, format='png')\n data = string_buf.getvalue().encode('base64').replace('\\n', '')\n else:\n _buf = BytesIO()\n image_pil.save(_buf, format='png')\n _buf.seek(0)\n b64_buf = base64.b64encode(_buf.getvalue())\n string_buf = StringIO(b64_buf.decode('utf-8', errors='replace'))\n data =string_buf.getvalue().replace('\\n', '')\n\n return 'data:image/png;base64,' + data", "def url_for(**options):\n\n url_parts = get_url_parts(**options)\n image_hash = hashlib.md5(b(options[\"image_url\"])).hexdigest()\n url_parts.append(image_hash)\n\n return \"/\".join(url_parts)", "def crawling_images_url(self, data_dict, output_choice, commentors = False, tagged = True):\n\n content = []\n usernamelist = []\n\n for key in data_dict:\n data = self.json_url(data_dict[key]['url'])\n tempdict = data['entry_data']['PostPage'][0]['graphql']['shortcode_media']\n \n if commentors:\n for comment in tempdict['edge_media_to_comment']['edges']:\n newuser = comment['node']['owner']['username']\n if newuser not in usernamelist and newuser != self.username:\n usernamelist.append(newuser)\n data1 = self.rootuser_info(self.userpage_scraper(newuser))\n if output_choice[0]:\n print(data1)\n elif output_choice[1]:\n content.append(data1)\n\n if tagged:\n for tag in tempdict['edge_media_to_tagged_user']['edges']:\n newuser = tag['node']['user']['username']\n if newuser not in usernamelist and newuser != self.username:\n usernamelist.append(newuser)\n data1 = self.rootuser_info(self.userpage_scraper(newuser))\n if output_choice[0]:\n print(data1)\n elif output_choice[1]:\n content.append(data1)\n\n if output_choice[1]:\n self.pretty_print({'content': content}, False)", "def html_factory(article, html_file):\n with html_file:\n tags.h1(article.title)\n tags.p(tags.b('Title: '), article.title)\n tags.p(tags.b('Link: ', tags.a(tags.b(article.link), href=article.link, )))\n tags.p(tags.b('Date: '), article.date.strftime(\"%a, %d %B, %Y\"))\n tags.p(tags.b('Source: '), article.source)\n tags.p(tags.b('Description: '), article.description)\n if article.image != '---':\n tags.p(tags.img(style=\"width:360px\", src=article.image))\n else:\n tags.p(tags.b('Sorry, no images for this article'))\n return html_file", "async def _misc_IMGplumbob(self, ctx):\r\n await self.bot.say('{}, http://i.imgur.com/q8xJsJQ.gif'.format(ctx.message.author.mention))", "def breadcrumb_subscriber( event ):\n img = '<img src=\"'+event.request.static_url(\"leirirekkari:static/img/icons/breadcrumb_home.png\") + '\" />'\n event.request.bread = [{'url':'/', 'text':img}]", "def html_img_tags(self):\n return self.findall_markdown_cells(r'<img[^>]*>')", "def PLACEHOLDER(width=100, height=100, HTTP=\"\", seperator='/'):\n return f\"{HTTP}://{CDN_IMG.PLACEHOLDER_SERVICE}/{width}{seperator}{height}\"", "def ShowHTML(pTitle, href):\n\n oc = ObjectContainer(title2=pTitle)\n\n href = href if href else ''\n html = HTML.ElementFromURL(BASE_URL + href)\n\n if '/pornstars-click/' in href:\n href = '/profiles/' + href.rsplit('/', 1)[1]\n url = BASE_URL + href\n\n xvideosBest = \"thumb-block \"\n if (len(html.xpath('//div[@class=\"thumbBlock\"]')) > 0):\n xvideosBest = \"thumbBlock\"\n\n if (len(html.xpath('//title//text()')) > 0):\n if 'Pornstar page' in html.xpath('//title//text()')[0]:\n url = url + '/pornstar_videos/0/0'\n html = HTML.ElementFromURL(url)\n elif 'Channel page' in html.xpath('//title//text()')[0]:\n url = url + '/uploads/0/0'\n html = HTML.ElementFromURL(url)\n\n for video in html.xpath('//div[@class=\"%s\"]' %xvideosBest):\n try:\n if '/profiles/' not in url and '/pornstars-click' not in url:\n if (len(video.xpath('./div/div/a//@href')) == 0):\n oc.add(VideoClipObject(\n url=BASE_URL + video.xpath('./p/a//@href')[0],\n title=video.xpath('./p/a//text()')[0],\n thumb=THUMB_REG.search(video.xpath('./div/div/script//text()')[0]).group(1)\n ))\n else:\n vhref = video.xpath('./p/a//@href')[0]\n vtitle = video.xpath('./p/a//text()')[0]\n oc.add(DirectoryObject(\n key=Callback(ShowHTML, href=vhref, pTitle=vtitle),\n title=vtitle, thumb=THUMB_REG.search(video.xpath('./div/div/a/script//text()')[0]).group(1)\n ))\n else:\n oc.add(VideoClipObject(\n url=BASE_URL + video.xpath('./div/p/a//@href')[0],\n title=video.xpath('./div/p/a//text()')[0],\n thumb=video.xpath('./div/div/a/img//@src')[0]\n ))\n except:\n Log.Warn('nothing')\n\n # setup nextURL\n try:\n nextURL = None\n if html.xpath('//li/a[@data-page][text()=\"Next\"]'):\n next_page = int(html.xpath('//li/a[text()=\"Next\"]/@data-page')[0])\n nextURL = '/{}/{}'.format(url.split('/', 3)[3].rsplit('/', 1)[0], next_page)\n elif html.xpath('//li/a[@class=\"no-page\"][text()=\"Next\"]'):\n nextURL = html.xpath('//li/a[@class=\"no-page\"][text()=\"Next\"]/@href')[0]\n elif html.xpath('//div[contains(@class,\"pagination\")]//a[@class=\"active\"]/../following-sibling::li/a/@href'):\n nextURL = html.xpath(\"//div[contains(@class,'pagination')]/ul/li/a[@class='active']/../following-sibling::li/a/@href\")[0]\n\n if nextURL:\n next_page_num = nextURL.split('=')[-1] if '&' in nextURL else nextURL.split('/')[-1]\n next_page_num = next_page_num if next_page_num else nextURL.split('/')[-2]\n #Log(u\"next page number = '{}'\".format(next_page_num))\n oc.add(NextPageObject(\n key=Callback(ShowHTML, href=nextURL, pTitle='Page ' + next_page_num),\n title=\"More ...\"))\n except:\n Log.Exception(\"Cannot find next page\")\n # it will loop through and return the values for all items in the page\n return oc", "def _get_image_url_in_content(self, content):\n begin_token = 'src=\"'\n begin = content.find(begin_token)\n if begin == -1:\n return None\n\n # Acrescentamos o tamanho do 'begin_token' no 'begin'\n begin += len(begin_token)\n end = content.find('\"', begin)\n url = content[begin:end]\n return url.split('?')[0]", "async def bImage(self, ctx, query, num=1):\r\n\r\n webpage = \"http://www.bing.com/images/search?q=\" + query.replace(\" \", \"+\") + \"&view=detailv2&adlt=off&selectedIndex=0\"\r\n\r\n html_content = urllib.request.urlopen(webpage)\r\n str_html = html_content.read().decode(\"utf-8\")\r\n match = re.findall(r'src=\"http://?([^\\'\" >]+)', str_html)\r\n if match:\r\n try:\r\n await ctx.send(\"http://\" + match[num-1])\r\n except (Exception):\r\n await ctx.send(\"```No \" + str(num) + \"th Result```\")\r\n else:\r\n await ctx.send(\"```No Image Found```\")", "def embed_image_html(imgBGR, target_width=TARGET_WIDTH, target_height=TARGET_HEIGHT):\n import cv2\n from PIL import Image\n if target_width is not None:\n imgBGR = _resize(imgBGR, t_width=target_width)\n elif target_width is not None:\n imgBGR = _resize(imgBGR, t_height=target_height)\n imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)\n pil_img = Image.fromarray(imgRGB)\n string_buf = StringIO()\n pil_img.save(string_buf, format='jpeg')\n data = string_buf.getvalue().encode('base64').replace('\\n', '')\n return 'data:image/jpeg;base64,' + data", "def user_images_url(self, datadict):\n\n dict1 = datadict['entry_data']['ProfilePage'][0]['graphql']['user']['edge_owner_to_timeline_media']\n no_of_posts = dict1['count']\n\n posts = dict1['edges']\n\n posts_info = {}\n for count, post in enumerate(posts):\n tempdict = {}\n\n tempdict['url'] = \"https://www.instagram.com/p/\" + post['node']['shortcode']\n tempdict['is_video'] = post['node']['is_video']\n tempdict['caption'] = post['node']['edge_media_to_caption']['edges'][0]['node']\n\n\n posts_info[count] = tempdict\n\n return (posts_info)", "def _format_image(image, tags):\n text = \", \".join(md.pre(tag) for tag in tags)\n dest = _to_dockerfile_url(image)\n return md.item(md.link(text, dest))", "def images(name):\n return static_file(name, root=os.path.join(BASEDIR, \"images\"))", "def get_image_link(self):\n table = self.soup.find('table')\n image_tag = table.find('img')\n image_name = self.soup.find_all(\"b\")[1].text\n return image_tag['src'], image_name\n\n # image = td.find_all('img')\n # print(image)\n # if image is not None:\n # return urljoin(self.base_url, image['src'])", "def img_alt_src(html_source):\n # =============================================================================\n # Beautiful soup\n # =============================================================================\n \n bs = BeautifulSoup(html_source, 'html.parser')\n \n #Getting all Alt text from HTML \n alt_txt = [] \n img_url = []\n for img in bs.find_all('img', alt=True):\n try:\n print(img['alt'])\n print(img['src'])\n a = img['alt']\n s = img['src']\n alt_txt.append(a)\n img_url.append(s)\n except:\n print('pass')\n \n \n cleaning(alt_txt, img_url)", "def getBody(HTMLstring, png_list):\n\n # Next, we generate all the rows but the last one...\n while len(png_list) > 4:\n HTMLstring += '<div class=\"row\">'\n for i in range(4):\n HTMLstring += ('''<div class=\"col-xs-3 imgbox\">\n <img class=\"img-responsive\" src=\"''' \n + png_list[i] + '''\" /><h5 class=\"center\">''' + png_list[i]\n + \"</h5></div>\")\n HTMLstring += \"</div>\"\n png_list = png_list[4:]\n \n # We obtain the last row by popping what remains.\n HTMLstring += '<div class=\"row\">'\n while len(png_list) > 0:\n png_file = png_list.pop(0)\n HTMLstring +=('''<div class=\"col-xs-3 imgbox\">\n <img class=\"img-responsive\" src=\"''' \n + png_file + '''\" /><h5 class=\"center\">''' + png_file\n + \"</h5></div>\")\n HTMLstring += \"</div>\"\n return HTMLstring", "def image(name, value, width=\"\", height=\"\", alt=None):\n if alt is None:\n alt = name\n log.debug(u\"image %s\" % value)\n html = u\"<img id=\\\"%s\\\" \" % name\n html += u'alt=\"%s\" ' % alt\n if width:\n html += u\"width=\\\"%s\\\" \" % width\n if height:\n html += u\"height=\\\"%s\\\" \" % height\n html += u\"src=\\\"%s\\\" \" % value\n html += u\"/>\\n\"\n return html", "def brief_json(self, absolutize_url):\n template = {}\n template.update({\n \"id\": self.image_id,\n \"links\": self.links_json(absolutize_url),\n \"name\": self.name\n })\n return template", "def __urlImageGenerator(cls, link):\n\n try:\n a = Article(url=link)\n a.download()\n a.parse()\n a.fetch_images()\n\n for img in a.imgs:\n yield img\n except Exception:\n pass", "def content(self, datas):\n subject = \"TCC - MLaaS Topzero D+\"\n\n body = f\"\"\"\n <h1>TCC MLaaS</h1>\n <p>Realizamos o treinamento da base de dados que nos foi enviada. Abaixo você pode ver nossa o resultado dos treinos realizados.</p>\n \"\"\"\n\n for data in datas:\n if data is not None:\n for key, info in data['images'].items():\n body += f\"\"\"{info[0]}\"\"\"\n if info[1] is not None:\n body += f\"\"\"<br><img src=\"cid:{info[1]}\" alt=\"{key}\" height=\"300\" width=\"300\"><br>\"\"\"\n\n body += f\"\"\"\n </br>\n Atenciosamente\n <h4><b>IESB</b></h4>\n <h5>created by <b>Allan Kleitson Teotonio</b></h5>\n <h5>Science Computer. 2019</h5>\n \"\"\"\n\n return subject, body", "def tag_and_embed_image_urls(self, image_urls, model=None):\n return self._multi_imageurl_op(image_urls, ['tag','embed'], model=model)", "def img_render(kve, lopt_str, sopt_str, gopt_str, popt_str, glopt_str, img_path):\n i_before = ''\n i_layer = ''\n i_after = ''\n i_label_str_html = ''\n if 'img' in kve:\n img_paths = [x.strip() for x in kve['img'].split(':')]\n for opt_str in glopt_str, popt_str, gopt_str, sopt_str, lopt_str:\n if 'autoilabel' in opt_str:\n i_label_str = os.path.splitext(os.path.basename(img_paths[0]))[0]\n i_label_str_html = ' <div class=\"label bottom\">' \\\n + i_label_str + '</div>'\n if 'ilabel' in kve:\n i_label_str = kve['ilabel']\n i_label_str_html = ' <div class=\"label bottom\">' \\\n + i_label_str + '</div>'\n img_tag_str = ''\n for idx, path in enumerate(img_paths):\n img_tag_str = img_tag_str + '<img src=\"' + img_path + img_paths[idx] + '\"/>'\n for opt_str in [glopt_str, popt_str, gopt_str, sopt_str, lopt_str]:\n if 'ibefore' in opt_str:\n i_before = ' <div class=\"layout ' + lopt_str \\\n + '\"><div class=\"img\">' + img_tag_str + '</div>' \\\n + i_label_str_html + '</div>'\n if 'iafter' in opt_str:\n i_after = ' <div class=\"layout ' + lopt_str \\\n + '\"><div class=\"img\">' + img_tag_str + '</div>' \\\n + i_label_str_html + '</div>'\n if not (i_before or i_after):\n i_layer = ' <div class=\"img\">' + img_tag_str + '</div>'\n return i_before, i_layer, i_after\n return '', '', ''", "def _collect_img_links(self):\n raise NotImplementedError", "def get_image_comic_url(session, response):\n soup = bs(response.text, 'lxml')\n for div in soup.find_all('div', class_=\"img-comic-container\"):\n for a in div.find_all('a', class_=\"img-comic-link\"):\n for img in a.find_all('img', src=True):\n return \"https:\" + img['src']" ]
[ "0.65580297", "0.6551254", "0.65510756", "0.6550227", "0.6437125", "0.6404325", "0.6345454", "0.63356936", "0.6310244", "0.6258961", "0.6221798", "0.6157185", "0.6137797", "0.60765594", "0.606654", "0.6046435", "0.60122216", "0.6008134", "0.60027015", "0.60017127", "0.5978731", "0.5950172", "0.5888022", "0.5865719", "0.5858332", "0.58505434", "0.5821355", "0.58195585", "0.5818279", "0.58034253", "0.57795256", "0.57734686", "0.5757102", "0.57512516", "0.57496184", "0.5741821", "0.5740479", "0.57259667", "0.5722323", "0.57159287", "0.56949216", "0.5694811", "0.56867915", "0.56866217", "0.5680487", "0.566889", "0.5655942", "0.5643483", "0.5624365", "0.5622925", "0.5616132", "0.5605249", "0.56015253", "0.5599998", "0.5590731", "0.5572891", "0.5561459", "0.5551721", "0.55492884", "0.55375", "0.5528547", "0.5526228", "0.552043", "0.5512654", "0.55079013", "0.55065924", "0.5494266", "0.5493481", "0.5486345", "0.54846513", "0.5483397", "0.5479986", "0.5475414", "0.54719317", "0.5465602", "0.54590756", "0.5446466", "0.5443482", "0.5434954", "0.5432839", "0.54313785", "0.5427405", "0.54251987", "0.54187685", "0.54135305", "0.54127693", "0.54092354", "0.5400677", "0.5400417", "0.53947973", "0.5386845", "0.5385445", "0.5382407", "0.5380116", "0.53783494", "0.53768843", "0.5373979", "0.53649503", "0.5360862", "0.53580785" ]
0.74483454
0
Export blogger html to clipboard. If full, export complete html, otherwise export html extract ready to paste into blogger edit mode.
def prepare_for_blogger(args): title, posts = parse_markdown(os.path.join(args.root, 'index.md')) online_images, online_videos = online_images_url(args) if args.check_images and check_images(args, posts, online_images) is False: pass html = compose_blogger_html(args, title, posts, online_images, online_videos) if args.full is False: html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1) html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL) html = STYLE.replace('%%', '%') + html if args.dest: with open(args.dest, 'wt', encoding='utf-8') as f: f.write(html) else: clipboard.copy(html)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_backup(filename, html):\n\n with open(filename, 'wb') as f:\n f.write(html)", "def write_to_paste_buffer(txt):\n pyperclip.copy(txt)", "def clipboard(self, text = None):\n if text == None:\n response = self._fetch_json('/api/clipboard')\n return response['content']\n else:\n postdata = codecs.encode(json.dumps({ 'content': text }), 'utf-8')\n self._urlopen('/api/clipboard', postdata).read()", "def _get_paste_page_content(self, url):\n paste_raw_url = self._get_post_url(url)\n paste_content = self._make_request(paste_raw_url, to_json=False)\n return paste_content.text", "def publish_html(self, readyhtml):\n with open(self.outfile,'w') as f_out:\n f_out.writelines(readyhtml)", "def copy(to_end=False):\n # Find a way to generalize this for different systems\n if to_end:\n with open('/Users/john/Terminal Saved Output', 'r') as f:\n output = f.read().replace('bpython', 'Python')\n code = output.split('\\nPython')[-1]\n else:\n code = pyperclip.paste()\n pyperclip.copy(parse_code(code))\n return None", "def save_clipboard(dist):\n tmpimg = ImageGrab.grabclipboard()\n if tmpimg:\n tmpimg.save(dist, 'PNG', compress_level=9)\n return dist\n return ''", "def do_copy_button( self, event ):\n #rint( \" do_copy_button -- this is all \")\n data = self.msg_text.get( 1.0, Tk.END )\n pyperclip.copy( data )", "def run_paste(self, expanded, unexpanded) :\n\t\tif expanded :\n\t\t\treturn self.errormessage(\"Doesn't need any argument\")\n\t\tif not self.HasPerms(self.__context, 'View management screens') :\n\t\t\treturn -1\n\t\tif not hasattr(self, '_clipboard') :\n\t\t\treturn self.errormessage(\"Clipboard is empty\")\n\t\ttry :\n\t\t\tself.__context.manage_pasteObjects(cb_copy_data = self._clipboard)\n\t\t\tself.htmlmessage(\"Clipboard's content pasted into %s\" % self.getcwd())\n\t\texcept CopyError :\n\t\t\treturn self.errormessage(\"Impossible to paste clipboard's content into %s\" % self.getcwd())", "def scrape_paste(self,paste_id):\n parameter = {'i': paste_id}\n r = requests.get('https://scrape.pastebin.com/api_scrape_item.php',params=parameter)\n return r.text", "def exportHtmlFile(self):\n\n fileName = QtGui.QFileDialog.getSaveFileName(None,\"Save html file\", os.getenv('HOME'))\n if fileName:\n fileName += \".html\"\n #print ((\"Exporting: to \" + fileName))\n filedata = \"<html>\\n<head>\\n<title>\" + self.settings['projectName'] + \"</title>\\n</head>\\n<body>\\n\"\n #filedata += str(self.htmlResults.encode('utf-8'))\n modData = \"\"\n for c in self.htmlResults:\n if ord(c) < 128:\n modData += c\n else:\n modData += \"&#\" + str(ord(c)) + \";\"\n filedata += modData\n filedata += \"</body>\\n</html>\"\n f = open(fileName, 'w')\n f.write(filedata)\n f.close()\n self.log += \"Search Results exported to \" + fileName + \"\\n\"\n QtGui.QMessageBox.information(None, \"Html file Export\", str(fileName) + \" exported\")", "def wonder():\n copy()\n get_soup()\n get_text()\n change_write_text()\n Check_status_time_stamp()", "def clip_copy(num):\n if g.browse_mode == \"ytpl\":\n\n p = g.ytpls[int(num) - 1]\n link = \"https://youtube.com/playlist?list=%s\" % p['link']\n\n elif g.browse_mode == \"normal\":\n item = (g.model.songs[int(num) - 1])\n link = \"https://youtube.com/watch?v=%s\" % item.ytid\n\n else:\n g.message = \"clipboard copy not valid in this mode\"\n g.content = generate_songlist_display()\n return\n\n if has_pyperclip:\n\n try:\n pyperclip.copy(link)\n g.message = c.y + link + c.w + \" copied\"\n g.content = generate_songlist_display()\n\n except Exception as e:\n xprint(link)\n xprint(\"Error - couldn't copy to clipboard.\")\n xprint(e.__doc__)\n xprint(\"\")\n input(\"Press Enter to continue.\")\n g.content = generate_songlist_display()\n\n else:\n g.message = \"pyperclip module must be installed for clipboard support\\n\"\n g.message += \"see https://pypi.python.org/pypi/pyperclip/\"\n g.content = generate_songlist_display()", "def clip(save, edit, browser, overwrite):\n html = clipboard.get_clipboard_html()\n if html is None:\n click.echo('No html in the clipboard')\n return\n\n if save is None:\n content = html2md.html_to_markdown(html).strip()\n click.echo(content)\n return\n\n if not save.endswith('.md'):\n click.echo('Note must have extension \".md\"')\n return\n\n note = Note(save)\n if os.path.exists(note.path.abs) and not overwrite:\n click.echo('Note already exists at \"{}\" (specify `--overwrite` to overwrite)'.format(note.path.abs))\n return\n\n html = parsers.rewrite_external_images(html, note)\n content = html2md.html_to_markdown(html).strip()\n note.write(content)\n\n if browser:\n click.launch('http://localhost:{0}/{1}'.format(conf.PORT, note.path.rel))\n\n if edit:\n click.edit(filename=note.path.abs)", "def direct_save():\n c = ClipboardMemo()\n c.save()", "def copy_to_clipboard(input):\n #\n # Define Tk Window and Prevent from Showing\n #\n root = tk.Tk()\n root.withdraw()\n #\n # Clear Clipboard and Append Text\n #\n root.clipboard_clear()\n root.clipboard_append(input)", "def exportBookmarksHtml(self, filePath=''):\n if not filePath:\n filePath = self.getFileName(_('TreeLine - Export HTML Bookmarks'),\n 'html')\n if not filePath:\n return False\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n if ExportDialog.exportWhat == ExportDialog.entireTree:\n self.selectedNodes = [self.rootNode]\n addBranches = ExportDialog.exportWhat != ExportDialog.selectNode\n title = _bookmarkTitle\n if len(self.selectedNodes) == 1 and addBranches:\n title = self.selectedNodes[0].title()\n self.selectedNodes = self.selectedNodes[0].childList\n lines = ['<!DOCTYPE NETSCAPE-Bookmark-file-1>',\n '<meta http-equiv=\"Content-Type\" content=\"text/html; '\n 'charset=utf-8\">', '<title>{0}</title>'.format(title),\n '<h1>{0}</h1>'.format(title)]\n for node in self.selectedNodes:\n lines.extend(node.exportHtmlBookmarks(addBranches))\n with open(filePath, 'w', encoding='utf-8') as f:\n f.writelines([(line + '\\n') for line in lines])\n return True", "def copyToClipboard(copy_str):\n\tcopier = Tk()\n\t# keep the window from showing\n\tcopier.withdraw()\n\tcopier.clipboard_clear()\n\t# text saved to clipboard\n\tcopier.clipboard_append(copy_str)\n\tcopier.destroy()", "def from_clipboard(self):\n for url in QApplication.clipboard().mimeData().urls():\n src = url.path()\n dst = os.path.join(self.current_location(), os.path.basename(src))\n try:\n if os.path.islink(src) or os.path.isfile(src):\n copyfile(src, dst, overwrite=False)\n elif os.path.isdir(src):\n copytree(src, dst, overwrite=False)\n except:\n QMessageBox.critical(self, 'Error copying file/dir', traceback.format_exc())", "def _on_articles_copy_link(self, evt=None):\n \n # get selected articles\n articles = self._articles_view.GetSelectedArticles()\n if not articles:\n return\n \n # format links\n text = \"\"\n for article in articles:\n if article.doi:\n text += \"http://dx.doi.org/%s\\n\" % article.doi\n \n # make text object for data\n obj = wx.TextDataObject()\n obj.SetText(text.strip())\n \n # paste to clipboard\n if wx.TheClipboard.Open():\n wx.TheClipboard.SetData(obj)\n wx.TheClipboard.Close()", "async def paste(text: str) -> str:\n\n async with aiohttp.ClientSession() as aioclient:\n post = await aioclient.post(\"https://hastebin.com/documents\", data=text)\n if post.status == 200:\n response = await post.text()\n return f\"https://hastebin.com/{response[8:-2]}\"\n\n # Fallback bin\n post = await aioclient.post(\"https://bin.drlazor.be\", data={\"val\": text})\n if post.status == 200:\n return post.url", "def convert_html():\n return", "def exportHtmlSingle(self, filePath=''):\n if not filePath:\n filePath = self.getFileName(_('TreeLine - Export HTML'), 'html')\n if not filePath:\n return False\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n if ExportDialog.exportWhat == ExportDialog.entireTree:\n self.selectedNodes = [self.rootNode]\n outputGroup = treeoutput.OutputGroup(self.selectedNodes,\n ExportDialog.includeRoot,\n ExportDialog.exportWhat !=\n ExportDialog.selectNode,\n ExportDialog.openOnly, True)\n outputGroup.addBlanksBetween()\n outputGroup.addIndents()\n outputGroup.addSiblingPrefixes()\n outGroups = outputGroup.splitColumns(ExportDialog.numColumns)\n htmlTitle = os.path.splitext(os.path.basename(filePath))[0]\n indent = globalref.genOptions.getValue('IndentOffset')\n lines = ['<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 '\n 'Transitional//EN\">', '<html>', '<head>',\n '<meta http-equiv=\"Content-Type\" content=\"text/html; '\n 'charset=utf-8\">', '<title>{0}</title>'.format(htmlTitle),\n '<style type=\"text/css\"><!--',\n 'div {{margin-left: {0}em}}'.format(indent),\n 'td {padding: 10px}', 'tr {vertical-align: top}',\n '--></style>', '</head>', '<body>']\n if ExportDialog.addHeader:\n headerText = (globalref.mainControl.activeControl.printData.\n formatHeaderFooter(True))\n if headerText:\n lines.append(headerText)\n lines.extend(['<table>', '<tr><td>'])\n lines.extend(outGroups[0].getLines())\n for group in outGroups[1:]:\n lines.append('</td><td>')\n lines.extend(group.getLines())\n lines.extend(['</td></tr>', '</table>'])\n if ExportDialog.addHeader:\n footerText = (globalref.mainControl.activeControl.printData.\n formatHeaderFooter(False))\n if footerText:\n lines.append(footerText)\n lines.extend(['</body>', '</html>'])\n with open(filePath, 'w', encoding='utf-8') as f:\n f.writelines([(line + '\\n') for line in lines])\n return True", "def write_html(self, filename):\n # todo: allow writing in split mode\n html = self.to_html()\n open(filename, 'wt').write(html)\n print('Exported app to %r' % filename)", "def print_contents(browser, dest='~/.browser.html'):\n import os\n open(os.path.expanduser(dest), 'w').write(browser.contents)", "def _get_pastes_page_content(self):\n url = self._get_post_list_url()\n content = self._make_request(url, to_json=False)\n return content.text", "def save(self, filename):\n outfile = open(filename, \"w\")\n outfile.write(self.html.encode('utf8'))\n outfile.close()", "def paste(self, text):\n if self.file is None:\n return self.paste_to_stdout(text)\n return self.paste_to_file(text)", "def __editPaste(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").paste()\n else:\n self.activeWindow().paste()", "def write(self,out):\n with open( out, \"wb\") as fi:\n fi.write(html.tostring(self.book))", "def __copyToClipboard(self):\n itm = self.findList.selectedItems()[0]\n if itm.parent():\n fn = itm.parent().text(0)\n else:\n fn = itm.text(0)\n \n cb = QApplication.clipboard()\n cb.setText(fn)", "async def pob(self, ctx):\n if str(ctx.guild.id) in ctx.bot.server_config.conf and ctx.bot.server_config.conf[str(ctx.guild.id)]['disable_pastebin']:\n return\n paste_keys = pastebin.fetch_paste_key(ctx.message.content)\n if not paste_keys: return\n xml = None\n paste_key = paste_keys[0]\n try:\n xml = await self.bot.loop.run_in_executor(None, pastebin.get_as_xml, paste_key)\n except:\n return\n if not xml: return\n stats = await self.bot.loop.run_in_executor(None, cache_pob_xml, xml, self.client)\n await self.make_responsive_embed(stats, ctx)", "def save_raw_html(r):\n raw_insert = {\"raw_html\": r}\n raw_html.insert_one(raw_insert)\n return None", "def outputHtml(s):\n htmlFile.write(s + \"\\n\")", "def offline_copy(_export_path):\n global export_path\n export_path = _export_path\n \n # First, monkey patch the original config\n main.NBCONFIG.protectect_dirs = []\n main.NBCONFIG.protected_users = {}\n main.NBCONFIG.edit_users = {}\n \n # Now monkey patch NBweb\n main.REQUIRELOGIN = False\n\n pages = []\n\n # Copy and work all source files \n for dirpath,dirnames,filenames in os.walk(NBCONFIG.source):\n for dirname in dirnames[:]: # Iterate a copy since we will delete in place\n if any(dirname.startswith(i) for i in ['.']):\n dirnames.remove(dirname) # So we do not parse it later\n continue\n if dirname == '_scratch':\n dirnames.remove(dirname) # So we do not parse it later\n continue\n \n # Names\n src_systemname = os.path.join(dirpath,dirname)\n rootname = os.path.relpath(src_systemname,NBCONFIG.source) # No leading / though\n dest_systemname = os.path.join(export_path,rootname)\n \n mkdir(rootname,isfile=False) # Will make the dir no matter what\n \n # Index\n dest = os.path.join(export_path,rootname, 'index.html')\n \n # Exclusions.\n if main.exclusion_check(utils.join('/',rootname +'/')):\n with open(dest,'w',encoding='utf8') as FF:\n FF.write('')\n continue\n \n try:\n html = main.main_route('/' + rootname + '/')\n except HTTPError:\n # Likely some additional resource in _NBweb\n try:\n os.rmdir(dest_systemname) # Should be empty\n except OSError:\n pass\n os.symlink(src_systemname,dest_systemname)\n continue\n \n \n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n # _all\n dest = os.path.join(export_path,'_all',rootname, 'index.html')\n mkdir(dest,isfile=True,isfull=True)\n \n html = main.allpage('/'+ rootname +'/')\n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n \n # Loop each file\n for filename in filenames:\n if os.path.splitext(filename)[0] == 'index':\n continue # Already made above\n \n # Names\n src_systemname = os.path.join(dirpath,filename)\n rootname = os.path.relpath(src_systemname,NBCONFIG.source) # No leading / though\n dest_systemname = os.path.join(export_path,rootname)\n \n mkdir(rootname,isfile=True) # Will make the dir no matter what\n try:\n os.symlink(src_systemname,dest_systemname) \n except OSError:\n os.remove(dest_systemname)\n os.symlink(src_systemname,dest_systemname)\n \n rootbasename,ext = os.path.splitext(rootname)\n if ext in NBCONFIG.extensions:\n dest = os.path.join(export_path,rootbasename + '.html')\n try:\n html = main.main_route(rootbasename + '.html')\n except:\n print('Issue with: {}'.format(rootname))\n \n html = process_page(html,dest)\n \n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n pages.append(rootbasename)\n\n ## Index pages\n # Home page w/o blog\n dest_systemname = os.path.join(export_path,'')\n dest = os.path.join(export_path,'index.html')\n \n html0 = main.main_route('/',map_view=True)\n \n html = process_page(html0,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n\n # Also write the sitemap\n dest = os.path.join(export_path,'_sitemap/index.html')\n mkdir('/_sitemap',isfile=False)\n html = process_page(html0,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n # _all\n dest = os.path.join(export_path,'_all','index.html')\n \n html = main.allpage('/')\n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n ## Blog Pages\n if len(NBCONFIG.blog_dirs) > 0:\n blog_num = 0\n while True:\n dest = os.path.join(export_path,'_blog',unicode(blog_num),'index.html')\n \n try:\n html = main.main_route('/',map_view=False,blog_num=blog_num)\n except HTTPError:\n break # At the last one\n \n mkdir(dest,isfile=True,isfull=True) \n \n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n blog_num += 1\n # Make the home page. \n dest = os.path.join(export_path,'index.html')\n html = main.main_route('/',map_view=False,blog_num=0)\n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n ## Special Pages\n make_random_forward(pages)\n \n # Tags\n dest = os.path.join(export_path,'_tags/index.html')\n mkdir(dest,isfile=True,isfull=True) \n html = main.return_tags()\n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n # ToDos\n dest = os.path.join(export_path,'_todo/index.html')\n mkdir(dest,isfile=True,isfull=True) \n html = main.return_todo()\n html = process_page(html,dest)\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(html)\n \n txt = main.return_todo_txt()\n dest = os.path.join(export_path,'_todo/todo.txt')\n with open(dest,'w',encoding='utf8') as FF:\n FF.write(txt)\n \n # Galleries\n cpsym( utils.join(NBCONFIG.scratch_path,'_galleries'),utils.join(export_path,'_galleries'))\n \n ## Clean up\n for F in [utils.join(export_path,'_NBweb',a) for a in ['NBCONFIG.py','NBCONFIG.pyc','template.html']]:\n try:\n os.remove(F)\n except:\n pass\n \n # Make sure there are never any directory listings\n for dirpath,dirnames,filenames in os.walk(export_path):\n if 'index.html' not in filenames:\n with open(utils.join(dirpath,'index.html'),'w',encoding='utf8') as F:\n F.write('')", "def compose_blogger_html(args, title, posts, imgdata, online_videos):\n for post in posts:\n for media in post.medias:\n if type(media) is PostImage:\n if media.uri not in imgdata:\n print('Image missing: ', media.uri)\n else:\n img_url, resized_url = imgdata[media.uri]\n media.uri = img_url\n media.resized_url = resized_url\n elif type(media) is PostVideo:\n if not online_videos:\n print('Video missing: ', media.uri)\n else:\n media.iframe = online_videos[0]\n del online_videos[0]\n else:\n assert False\n\n return print_html(args, posts, title, '', target='blogger')", "def copy_to_clipboard(self, txt):\r\n cmd = 'echo \"' + txt.strip() + '\"|clip'\r\n return subprocess.check_call(cmd, shell=True)", "def output_html(self, path):\n if path is None:\n return\n import os\n fout = codecs.open(os.path.abspath(path), 'w', encoding='utf-8')\n fout.write('<html><body><table>')\n for data in self.datas:\n fout.write('<tr><td>%s</td><td>%s</td><td>%s</td></tr>' % (data['url'], data['title'], data['summary']))\n self.datas.remove(data)\n fout.write('</table></body></html>')\n fout.close()", "def copy_raw_bibtex(self, entry_idx=None):\n if entry_idx is None:\n entry_idx = self.selector.get_selection()\n else:\n entry_idx = self.selector.select_by_index(entry_idx)\n if not entry_idx:\n self.visual.error(\"Need a selection to show raw bibtex of\")\n return\n\n entries = self.get_current_entries()\n entries_string = Writer.entries_to_bibtex_string(entries)\n clipboard.copy(entries_string)\n self.visual.message(f\"Copied raw bibtex content of {len(entries)} entries.\")", "def htmlDocContentDumpOutput(self, buf, encoding):\n if buf is None: buf__o = None\n else: buf__o = buf._o\n libxml2mod.htmlDocContentDumpOutput(buf__o, self._o, encoding)", "def _on_articles_copy_summary(self, evt=None):\n \n # get selected articles\n articles = self._articles_view.GetSelectedArticles()\n if not articles:\n return\n \n # format citations\n text = \"\"\n for article in articles:\n text += article.format(\"[TI]\\n[AU]\\n[CI]\\n[DOIX]\\n\\n[AB]\\n\\n\")\n \n # make text object for data\n obj = wx.TextDataObject()\n obj.SetText(text.strip())\n \n # paste to clipboard\n if wx.TheClipboard.Open():\n wx.TheClipboard.SetData(obj)\n wx.TheClipboard.Close()", "def copy_link(self):\n try:\n Clipboard.copy(self.url)\n except:\n self.ids.link.text=self.link_message", "def clipboard_copy(text):\n result = subprocess.run(\n # \"primary\" because \"clipboard\" doesn't seem to work for all apps\n # you must paste with middle click\n [\"xclip\", \"-selection\", \"primary\", \"-l\", \"1\"],\n input=bytes(text, encoding=\"utf-8\")\n )\n if result.returncode == 0:\n pass\n else:\n print(\"Error copying\")", "def extract(url, output):\n click.echo(\"Start progress..\")\n news = url.context\n e = Extractor(news)\n click.echo(\"Extract information..\")\n try:\n news_text = e.get_news()\n except ExtractException as exception:\n # we aren't showing traceback to user, just echoing error message\n return click.echo(\"Page parsing error: %s\" % exception.args[0])\n\n with open(output, \"w\", encoding=\"utf-8\") as out_file:\n # print(news_text, file=out_file, flush=True)\n with click.progressbar(news_text, length=len(news_text.encode()),\n label=\"Writing to file\") as bar:\n for item in bar:\n print(item, end='', file=out_file, flush=True)\n\n click.echo(\"Finished!\")", "def test_copy_details(self):\n self.new_details.save_details()\n twitter = Details('Dennis', 'Facebook', 'Kiplangat', 'kiplangat18')\n twitter.save_details()\n find_details = None\n for details in Details.user_details_list:\n find_details = Details.find_by_site_name(details.site_name)\n return pyperclip.copy(find_details.password)\n\n Details.copy_details(self.new_details.site_name)\n self.assertEqual('kiplangat18', pyperclip.paste())\n print(pyperclip.paste())", "def copy_as_curl(self):\n curl_headers = ''\n for header, value in self.headers:\n curl_headers += \"-H '{}: {}' \".format(header, value)\n curl_data = ''\n if self.operation in ('POST', 'PUT'):\n curl_data = \"--data '{}' \".format(self.data)\n curl_cmd = \"curl '{}' {} {}--compressed\".format(self.url.url(), curl_headers, curl_data)\n QApplication.clipboard().setText(curl_cmd)", "def copy_to_clipboard(show_string, copy_string):\n copy_command = \"echo %s | pbcopy\" % (copy_string)\n print \"%s | bash='/bin/bash' param1='-c' param2='%s' terminal=false\" \\\n % (show_string, copy_command)", "def copy_link(self):\n try:\n Clipboard.copy(self.url)\n except(AttributeError):\n self.ids.label.text = self.error_msg", "def htmlDocContentDumpFormatOutput(self, buf, encoding, format):\n if buf is None: buf__o = None\n else: buf__o = buf._o\n libxml2mod.htmlDocContentDumpFormatOutput(buf__o, self._o, encoding, format)", "def export(self, package):\n self.style = package.style\n self.copyFiles(package)\n self.html = self.renderHeader(package.name)\n self.html += u\"<body>\\n\"\n self.html += u\"<div id=\\\"content\\\">\\n\"\n self.html += u\"<div id=\\\"header\\\">\\n\"\n self.html += escape(package.title)\n self.html += u\"</div>\\n\"\n self.html += u\"<div id=\\\"main\\\">\\n\"\n self.renderNode(package.root)\n self.html += u\"</div>\\n\"\n self.html += u\"</div>\\n\"\n self.html += u\"</body></html>\\n\"\n self.save(self.outputDir/\"index.html\")", "def process_page(html,dest):\n html0 = html[:]\n to_root = os.path.relpath(export_path,dest)\n to_root = to_root[1:]# Change '../' or '..' to '.' or './'\n \n # Fix links to directories first since that is easier to find\n html,N1 = re_dirlinks.subn(r'\\1=\"/\\2/index.html\"',html)\n \n # all pages links\n html,N2 = re_all.subn(r'\\1=\"/_all/\\2/index.html\"',html)\n \n # Add index.html for any other internal links. NOTE: by preprocessing\n # all internal links from the main content will already end in .html so this\n # is just special pages.\n for match in re_intlinks.finditer(html):\n dest = match.groups()[-1]\n ext = os.path.splitext(dest)[-1]\n if ext == '':\n old = r'{}=\"/{}\"'.format(*match.groups())\n new = r'{}=\"/{}\"'.format(match.groups()[0], os.path.join(match.groups()[1],'index.html') )\n html = html.replace(old,new)\n \n # Now make all links to the root\n html,N3 = re_intlinks.subn(r'\\1=\"{}/\\2\"'.format(to_root),html)\n \n # Remove the search stuff\n out = []\n ff = False\n for line in html.split('\\n'):\n if not ff and '<!-- search -->' not in line:\n out.append(line)\n continue\n \n if '<!-- search -->' in line:\n ff = True\n \n if ff and '<!-- /search -->' in line:\n ff = False\n\n html = '\\n'.join(out)\n return html", "def dump_html(self):\n l_html = self.m_driver.find_element_by_xpath('//html').get_attribute('outerHTML')\n with open(datetime.datetime.now().strftime('%Y%m%d_%H%M%S.html'), 'w') as f:\n f.write(l_html)", "def toClipboard(self):\n buf = io.BytesIO()\n self.fig.savefig(buf, dpi=300, facecolor='w', format='png',\n transparent=True)\n QtGui.QApplication.clipboard().setImage(\n QtGui.QImage.fromData(buf.getvalue()))\n buf.close()", "def clipboard(self, data):\n p = subprocess.Popen([\"xclip\", \"-selection\", \"clipboard\"], stdin=subprocess.PIPE)\n p.stdin.write(data.encode())\n p.stdin.close()", "def save(self):\n html_file = '{}/{}.html'.format(self.web_dir, self.title)\n f = open(html_file, 'wt')\n f.write(self.doc.render())\n f.close()", "def contentRaw(request):\n paste = Paste.get(request.matchdict['idContent'])\n # TODO type/mime\n return paste.content", "def copy(self):\r\n ret=' '\r\n if self.REQUEST.SESSION.has_key('my_path'):\r\n\t zpath=self.REQUEST.SESSION['my_path'].replace('toolbox_root','').strip('/')\r\n\t #ret=zpath\r\n\t if self.REQUEST.SESSION.has_key('copy_bild'):\r\n\t\t cp_bild=self.REQUEST.SESSION['copy_bild'].split('/')[-1].strip('/')\r\n\t\t cp_path=str('/').join(self.REQUEST.SESSION['copy_bild'].split('/')[0:-1])\r\n\t\t #ret+=' '+cp_path+' '+cp_bild\r\n\t\t if cp_path!=zpath:\r\n\t\t \tn_id=search_id(self,self.restrictedTraverse(zpath).objectValues('Image'))\r\n\t\t \t#ret+=' '+n_id\r\n\t\t\tfor x in liste_val:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tfor obj in self.restrictedTraverse(cp_path).objectValues('Image'):\r\n\t\t\t\t\t if str(obj.getId())[0:6]==cp_bild:\r\n\t\t\t\t\t\tmy_clip=self.restrictedTraverse(cp_path).manage_copyObjects([obj.getId()])\r\n\t\t\t\t\t\tcopied=self.restrictedTraverse(zpath).manage_pasteObjects(my_clip)\r\n\t\t\t\t\t\t#ret+=' new id : '+str(copied[0]['new_id'])\r\n\t\t\t\t\t\t#if str(copied[0]['new_id']).split('_')[0]!=n_id:\r\n\t\t\t\t\t\t#\tself.restrictedTraverse(zpath).manage_renameObjects([str(copied[0]['new_id'])],[str(n_id+x)])\r\n\t\t\t\t\t\t\t#ret +=' False '\r\n\t\t\t\t\t\t#ret+='<br>\\n'\r\n\t\t\t\texcept:\r\n\t\t\t\t\tret+=''\r\n else:\r\n\t ret=' '\r\n return ' '", "def _pasteFile(self) -> None:\n if not self._fileClipboard:\n return\n cut = self._fileClipboard.pop()\n filenames = [x.name for x in self._fileClipboard]\n destPaths = [self._currPath.joinpath(x) for x in filenames]\n try:\n duplicates = []\n for src, dest in zip(self._fileClipboard, destPaths):\n if src == dest:\n raise shutil.SameFileError\n if dest in self._currPath.glob('*'):\n duplicates.append(dest)\n if duplicates:\n if self._overwriteFileMsgBox(duplicates) == QMessageBox.Cancel:\n self._fileClipboard.clear()\n self._pasteFileAction.setEnabled(False)\n return\n for src, dest in zip(self._fileClipboard, destPaths):\n if cut and src.is_file():\n shutil.move(str(src), str(dest))\n elif src.is_dir():\n dir_util.copy_tree(str(src), str(dest))\n if cut:\n shutil.rmtree(src)\n elif src.is_file():\n shutil.copy(str(src), str(dest))\n elif not src.exists():\n raise FileNotFoundError\n self._statusBar.showMessage('File pasted!', 3000)\n self._fileClipboard.clear()\n self._pasteFileAction.setEnabled(False)\n except shutil.SameFileError:\n self._statusBar.showMessage('You cannot overwrite the same file!', 3000)\n self._fileClipboard.clear()\n except PermissionError:\n self._statusBar.showMessage('No permission to copy the file!', 3000)\n self._fileClipboard.clear()\n except FileNotFoundError:\n self._statusBar.showMessage('Cannot find the source file!', 3000)\n self._fileClipboard.clear()\n finally:\n self._listDirectories()", "def get_paste_buffer():\n pb_str = pyperclip.paste()\n\n # If value returned from the clipboard is unicode and this is Python 2, convert to a \"normal\" Python 2 string first\n if six.PY2 and not isinstance(pb_str, str):\n import unicodedata\n pb_str = unicodedata.normalize('NFKD', pb_str).encode('ascii', 'ignore')\n\n return pb_str", "def set_clipboard(content, *args, **kwargs):\n G.DEVICE.set_clipboard(content, *args, **kwargs)", "def htmlDocContentDumpOutput(self, cur, encoding):\n if cur is None: cur__o = None\n else: cur__o = cur._o\n libxml2mod.htmlDocContentDumpOutput(self._o, cur__o, encoding)", "def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = '%02x' % random.getrandbits(256)\n\n lexer = get_lexer_by_name(self.language)\n options = self.title and {'title': self.title} or {}\n formatter = HtmlFormatter(style=self.style, linenos='table',\n full=True, **options)\n self.highlighted = highlight(self.code, lexer, formatter)\n super(Paste, self).save(*args, **kwargs)", "def copy(self, event):\n selection = self.get_selection()\n if not selection:\n return []\n start_row, start_col, end_row, end_col = selection\n data = u''\n rows = range(start_row, end_row + 1)\n for row in rows:\n columns = range(start_col, end_col + 1)\n for idx, column in enumerate(columns, 1):\n if idx == len(columns):\n # if we are at the last cell of the row, add new line instead\n data += self.GetCellValue(row, column) + \"\\n\"\n else:\n data += self.GetCellValue(row, column) + \"\\t\"\n text_data_object = wx.TextDataObject()\n text_data_object.SetText(data)\n if wx.TheClipboard.Open():\n wx.TheClipboard.SetData(text_data_object)\n wx.TheClipboard.Close()\n else:\n wx.MessageBox(\"Can't open the clipboard\", \"Warning\")", "def saveHtml(path: str, filename: str, html: str) -> None:\n filepath = os.path.join(path, filename)\n with open(filepath, \"w\") as fileHandle:\n fileHandle.write(html)\n return filepath", "def saveToFile(html):\n #print(\"Saving to file.\")\n html += \"\\n\"\n #open necessary files to save\n logFile = open(\"postLog_{0}_{1}.txt\".format(os.path.splitext(path)[0], dateTimeNow), \"a\")\n logFile.write(html)\n logFile.close()\n #print(\"Check Point.\")", "def paste(cmd=paste_cmd, stdout=PIPE):\n return Popen(cmd, stdout=stdout).communicate()[0].decode('utf-8')", "def save_page_as(browser, file_name):\n\n with open(file_name, \"w\") as fout:\n fout.write(browser.find_element_by_tag_name(\"pre\").text)", "def write_paste_log(url):\n with open('checked_pastes.txt', 'a') as paste_file:\n paste_file.writelines(url+\"\\n\")", "def process_webpage(self, target, output_file, url, embed, selenium):\n\t\t# Build the output file's name\n\t\tself._build_output_file(output_file)\n\t\t# Open the output file and clone the webpage\n\t\twith open(self.output_file_name, \"w\") as output:\n\t\t\tself.collect_source(target, output, url, embed, selenium)", "def download_html_command():\n # 1. Get input scan id from Demisto\n scanid = demisto.args().get('scanid')\n # 2. Get the forensic webpage HTML from SlashNext API\n response = download_html(scanid=scanid)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n html_base64 = response.get('htmlData').get('htmlBase64')\n html_data = base64.b64decode(html_base64)\n\n html_file = fileResult('slashnext_{}.html'.format(scanid), html_data, entryTypes['file'])\n\n demisto.results({\n 'Type': entryTypes['file'],\n 'ContentsFormat': formats['text'],\n 'Contents': 'Forensics: Webpage HTML for URL Scan ID = {}'.format(scanid),\n 'File': html_file.get('File'),\n 'FileID': html_file.get('FileID')\n })", "def save_current_nb_as_html(info=False):\n assert in_ipynb()\n\n full_path = get_notebook_name()\n path, filename = os.path.split(full_path)\n\n wd_save = os.getcwd()\n os.chdir(path)\n cmd = 'jupyter nbconvert --to html \"{}\"'.format(filename)\n os.system(cmd)\n os.chdir(wd_save)\n\n if info:\n print(\"target dir: \", path)\n print(\"cmd: \", cmd)\n print(\"working dir: \", wd_save)", "def cli(ctx, resource, blog, relative_path, verbose):\n ctx.verbose = verbose\n validate_blog_and_settings(ctx, blog, relative_path)\n export_path = resolve_export_path(ctx, relative_path)\n\n resource_map = {\n \"design_assets\": copy_design_assets,\n \"blog_template\": copy_blog_template,\n \"blog_index\": copy_blog_index,\n \"blog_config\": copy_blog_config,\n \"blog_layout\": copy_blog_layout,\n }\n\n transfer = resource_map.get(resource)\n if not transfer:\n ctx.log(\"No such resource. See blogger export --help\")\n ctx.log(\"ERROR: INVALID RESOURCE NAME\")\n raise SystemExit()\n\n ctx.vlog(\"Using function\", transfer)\n transfer(ctx, export_path)", "def test_export_html(self):\r\n resp = self.client.get_html(self.url)\r\n self.assertEquals(resp.status_code, 200)\r\n self.assertContains(resp, \"Export My Course Content\")", "def htmlDocDump(self, f):\n ret = libxml2mod.htmlDocDump(f, self._o)\n return ret", "def dump(self, path, mode='standalone'):\n if mode == 'standalone':\n with open(path+\"/export_grid_standalone\"+str(self._id)+\".html\", 'w+') as f:\n f.write(self.export_html(build=True))\n elif mode == 'all':\n widget_export = self.export_html(build=False)\n with open(path+\"/export_scripts.html\", \"w+\") as f:\n f.write(widget_export['script_tags'])\n with open(path+\"/export_html_state.html\", \"w+\") as f:\n f.write(widget_export['html_state'])\n with open(path+\"/export_state_\"+str(self._id)+\".json\", \"w+\") as f:\n f.write(json.dumps(widget_export['manager_state']))\n with open(path+\"/export_grid_\"+str(self._id)+\".html\", \"w+\") as f:\n f.write(widget_export['grid_div'])", "def _on_articles_copy_citation(self, evt=None):\n \n # get selected articles\n articles = self._articles_view.GetSelectedArticles()\n if not articles:\n return\n \n # format citations\n text = \"\"\n for article in articles:\n text += article.format(\"[TI]\\n[AU]\\n[CI]\\n[DOIX]\\n\\n\")\n \n # make text object for data\n obj = wx.TextDataObject()\n obj.SetText(text.strip())\n \n # paste to clipboard\n if wx.TheClipboard.Open():\n wx.TheClipboard.SetData(obj)\n wx.TheClipboard.Close()", "def print_html(html):\n display(HTML(html))", "def assemble(self, page):\n html = self.include(page)\n html = HTMLBeautifier.beautify(html, 4)\n path = osp.join(self.dst, page)\n with open(path, \"wt\") as fp:\n fp.write(html)", "def save_trail_html(title, url):\n r = requests.get(url).text\n raw_insert = {'trail': title,\n \"raw_html\": r}\n trail_page_raw_html.insert_one(raw_insert)\n return None", "def set_code(path, doc):\n if path is None:\n pasteboard.set(doc)\n else:\n with open(path, 'w', encoding='UTF-8') as html_file:\n html_file.write(str(doc))", "def edit_html():\n # save the new html_raw\n entry = request.form.get(\"entry\")\n entry_dict[entry]['html_raw'] = request.form.get(\"raw\")\n entry_dict[entry]['html'] = raw_to_html(entry, entry_dict[entry]['html_raw'])\n # save into pkl file\n with open(entry_dict_path, 'wb') as pkl_file:\n cPickle.dump(entry_dict, pkl_file)\n # update the website\n return jsonify(True)", "def __editCopy(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").copy()\n else:\n self.activeWindow().copy()", "def ReferencePhase2Clipboard (self, event) :\n\t\tif wx.TheClipboard.Open() :\n\t\t\ttxt_data = str( self.GetReferencePhase()[0] )[1:-1]\n\t\t\twx.TheClipboard.SetData(wx.TextDataObject(txt_data)) \n\t\t\twx.TheClipboard.Close()\n\t\telse :\n\t\t\twx.MessageBox(\"Unable to open the clipboard\", \"Error\")", "def ShowHTML(self, url):\n import sys, os, urllib\n if urllib.splittype(url)[0] is None: # just a file spec\n if hasattr(sys, \"frozen\"):\n fname = os.path.join(os.path.dirname(sys.argv[0]),\n \"..\", \"docs\", \"sb_server\", url)\n if not os.path.isfile(fname):\n fname = os.path.join(os.path.dirname(sys.argv[0]), url)\n else:\n fname = os.path.join(os.path.dirname(__file__), \"docs\", url)\n fname = os.path.abspath(fname)\n if not os.path.isfile(fname):\n self.ShowMessage(\"Can't find \"+url)\n return\n url = fname\n SetWaitCursor(1)\n os.startfile(url)\n SetWaitCursor(0)", "def save_html(self, file_name=None, raw_html=True):\n if raw_html:\n with open(file_name or self.url_obj.file_path, 'wb') as fh:\n fh.write(self.raw_html)\n else:\n self.lxml.getroottree().write(file_name or self.url_obj.file_path, method=\"html\")", "def htmlDocContentDumpFormatOutput(self, cur, encoding, format):\n if cur is None: cur__o = None\n else: cur__o = cur._o\n libxml2mod.htmlDocContentDumpFormatOutput(self._o, cur__o, encoding, format)", "def create_full_batter_html(url):\n # raw_batter_list = scrape_razzball_batters(url)\n raw_batter_list = fantasy_pro_players(url)\n return create_full_batter(raw_batter_list)", "def copy_text(self):\n self.window.clipboard_clear()\n if self.tab_control.index(\"current\") == 0:\n try:\n self.text = self.textbox.get(\"sel.first\", \"sel.last\")\n except tk.TclError:\n self.text = self.textbox.get(\"1.0\", tk.END)\n self.window.clipboard_append(self.text)\n elif self.tab_control.index(\"current\") == 1:\n self.window.clipboard_append(self.words)", "def paste_text(text, language=\"text\", paste_expire=8640, paste_user=\"paste.py\",\n return_link=True):\n # costruct url\n data = {\"paste_data\": text,\n \"paste_lang\": language,\n \"api_submit\": \"true\",\n \"mode\": \"json\",\n \"paste_user\": paste_user,\n \"paste_expire\": paste_expire\n }\n try:\n with contextlib.closing(urllib2.urlopen(PASTE_BASE_URL, urllib.urlencode(data))) as query:\n id = json.loads(query.read(), object_hook=Struct).result.id\n return PASTE_BASE_URL + id if return_link else id\n except urllib2.HTTPError as e:\n print(\"Error uploading file:\")\n print(e.reason)", "def _copy_to_clipboard_ctrl(self):\n res = \"\"\n clipboard = QtWidgets.QApplication.clipboard()\n proxies = self._get_table()\n for proxy in proxies:\n res += str(proxy) + \"\\n\"\n clipboard.clear(mode=clipboard.Clipboard)\n clipboard.setText(res, mode=clipboard.Clipboard)", "def saveCopyCurrentEditor(self):\n aw = self.activeWindow()\n self.saveCopyEditorEd(aw)", "def write_html(self):\n html_exporter = HTMLExporter(template_file=os.path.join(config[\"templates_dir\"], \"notebook.tpl\"))\n for nb in self.notebooks:\n (body, resources) = html_exporter.from_notebook_node(nb.content)\n body = re.sub('{github_user_name}', config[\"github_user_name\"], body)\n body = re.sub('{github_repo_name}', config[\"github_repo_name\"], body)\n html_path = os.path.join(self.dst_dir, os.path.splitext(nb.filename)[0] + \".html\")\n print(f\"- writing {html_path}\")\n with open(html_path, 'w') as f:\n f.write(body)", "def remove_html( html):\n return html2txt(html)", "def _save_book_detail(self, book_dict, book_nmb):\n content = self._get_book_content(book_dict['book_page_href'])\n with open(f'{PATH_TO_DATA}/{book_nmb}.html', 'w') as file:\n file.write(content)", "def export_notebook():\n #system(\"jupyter nbconvert --to HTML \\\"Look At Enron data set.ipynb\\\"\")\n system(\"jupyter nbconvert --to HTML --output=Look+At+Enron+data+set.html \\\"Look At Enron data set.ipynb\\\"\")\n return", "def get_help_content(burl):\n\n box_content = ''+\\\n '<div class=\"box-top\">' +\\\n ' <div class=\"row\">'+\\\n ' <div class=\"col-lg-12 col-md-12 col-sm-12 col-xs-12\">'+\\\n ' <div class=\"box-part rounded sa-center-content\" style=\"'+\\\n theme_return_this('', 'border-style:solid; border-width:thin; border-color:#343a40;') +'\">'+\\\n get_help_tabs(burl) +\\\n ' </div>'+\\\n ' </div>'+\\\n ' </div>'+\\\n '</div>'\n return box_content", "def read_from_clipboard():\n\n return pyperclip.paste()", "def paste(*_, **settings):\n\n hook_exceptions()\n return app", "def _CreateClipboardImage( self ):\n bmap = None\n\n fd, name = tempfile.mkstemp( '.png' )\n try:\n os.close( fd )\n if self.CreatePrintImage( name ):\n bmap = wx.Image( name, wx.BITMAP_TYPE_PNG ).ConvertToBitmap()\n finally:\n os.remove( name )\n\n return bmap", "def __html__(self) -> str:\n components = [\n self.attributee_html,\n self.linked_title if self.title else 'untitled document',\n self.date.string if self.date else '',\n self.descriptive_phrase,\n f'archived in {self.collection}' if self.collection else '',\n ]\n return self.components_to_html(components)" ]
[ "0.5765858", "0.56113607", "0.55505186", "0.55185163", "0.54795206", "0.5450024", "0.5427715", "0.54080874", "0.5321129", "0.5313652", "0.52682567", "0.52502394", "0.52400166", "0.5201354", "0.5193638", "0.51862955", "0.51706195", "0.51384276", "0.5110709", "0.51039314", "0.50777346", "0.50516135", "0.5049289", "0.5045748", "0.50399256", "0.5038537", "0.50320333", "0.50315464", "0.50192195", "0.5017713", "0.5015303", "0.49910522", "0.49899396", "0.4975696", "0.49301708", "0.49295437", "0.49287626", "0.49238238", "0.48951623", "0.48802155", "0.4878112", "0.48772863", "0.48728707", "0.4862285", "0.48567897", "0.4851482", "0.48510426", "0.4845891", "0.4826378", "0.48261222", "0.48069593", "0.47996047", "0.47963637", "0.47725597", "0.47696292", "0.4719436", "0.47084716", "0.4704344", "0.46999788", "0.4699498", "0.4697988", "0.46959165", "0.46952295", "0.46871516", "0.46869463", "0.46794647", "0.46754578", "0.46743247", "0.46629965", "0.46565866", "0.4654243", "0.46472007", "0.46321294", "0.46137992", "0.46097672", "0.46054712", "0.4605299", "0.45858222", "0.45816666", "0.45787", "0.45783478", "0.4575048", "0.45635816", "0.45632213", "0.45549494", "0.4554627", "0.4552195", "0.45467612", "0.45423475", "0.45394534", "0.4531241", "0.45263818", "0.4525857", "0.45255804", "0.45190412", "0.45119485", "0.4510758", "0.45053062", "0.44992852", "0.44991896" ]
0.6119939
0
For testing identity between a diary file and the fle obtained after reading and printing it. See testing.
def idempotence(args): title, posts = parse_markdown(os.path.join(args.root, 'index.md')) print_markdown(posts, title, os.path.join(args.dest, 'index.md'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _test_example_eda_adf():\n main([\"pnictogen/repo/split.ADF.in\", \"data/water-dimer.xyz\"])\n assert_equals(\n open(\"data/water-dimer_eda.in\").read(),\n \"\"\"TITLE data/water-dimer.xyz eda\n\nCHARGE 0 0\n\nNumber of atoms\n 6\n\nATOMS Cartesian\nO 0.12908 -0.26336 0.64798 f=f1\nH 0.89795 0.28805 0.85518 f=f1\nH 0.10833 -0.20468 -0.33302 f=f1\nO 0.31020 0.07569 -2.07524 f=f2\nH -0.26065 0.64232 -2.62218 f=f2\nH 0.64083 -0.57862 -2.71449 f=f2\nEnd\n\nFragments\n f1 data/water-dimer_f1.t21\n f2 data/water-dimer_f2.t21\nEnd\n\nBasis\nEnd\n\nGeometry\nEnd\n\n\"\"\",\n )\n assert_equals(\n open(\"data/water-dimer_f1.in\").read(),\n \"\"\"TITLE data/water-dimer.xyz f1\n\nCHARGE 0 0\n\nNumber of atoms\n 3\n\nATOMS Cartesian\nO 0.12908 -0.26336 0.64798\nH 0.89795 0.28805 0.85518\nH 0.10833 -0.20468 -0.33302\nEnd\n\nBasis\nEnd\n\nGeometry\nEnd\n\n\"\"\",\n )\n assert_equals(\n open(\"data/water-dimer_f2.in\").read(),\n \"\"\"TITLE data/water-dimer.xyz f2\n\nCHARGE 0 0\n\nNumber of atoms\n 3\n\nATOMS Cartesian\nO 0.31020 0.07569 -2.07524\nH -0.26065 0.64232 -2.62218\nH 0.64083 -0.57862 -2.71449\nEnd\n\nBasis\nEnd\n\nGeometry\nEnd\n\n\"\"\",\n )", "def identify_file(self, file):", "def test_is_delicious_file(self):\r\n good_file = self._get_del_file()\r\n\r\n self.assertTrue(\r\n DelImporter.can_handle(good_file),\r\n \"DelImporter should handle this file\")\r\n\r\n good_file.close()", "def test_Telegramfd (self):\n\t\t# This file is delivered openned while this test.\n\t\ttestfilepath = os.path.join (self.hotfolder, \"fileinuse.txt\")\n\t\tf = open(testfilepath,\"a\") #opens file with name of \"test.txt\"\n\t\tf.write(\"This file is now opened and i'm writting on it \\n\")\n\n\n\t\tknown_values = set ([\n\t\t\t('TESTS/Test3/Telegram Desktop/Printer output.pdf', '.file'),\n\t\t\t('TESTS/Test3/Telegram Desktop/This is a Zero avi file.avi', '.file'),\n\t\t\t('TESTS/Test3/Telegram Desktop/File not in use.CBR', '.file'),\n\t\t\t('TESTS/Test3/Telegram Desktop/This is a dummy rar file.rar', '.rar'),\n\t\t\t])\n\n\t\tEntries = MD.Telegramfd (self.hotfolder)\n\t\tself.assertEqual (known_values, set(Entries))\n\n\t\tf.close ()", "def test_is_delicious_file(self):\r\n good_file = self._get_del_file()\r\n self.assertTrue(\r\n DelXMLImporter.can_handle(good_file),\r\n \"DelXMLImporter should handle this file\")\r\n good_file.close()", "def test_get_infile(self):\r\n pass # not practically testable, but obvious file I/O\r", "def test_fileinuse (self):\t\t\n\t\t# open a file:\n\t\ttestfilepath = os.path.join (self.testfolder, \"fileinuse.txt\")\n\t\tf = open(testfilepath,\"a\") #opens file with name of \"test.txt\"\n\t\tf.write(\"This file is now opened and i'm writting on it \\n\")\n\t\tself.assertEqual (MD.fileinuse(testfilepath), True) # Checks a file that it is beign written.\n\t\tself.assertEqual (MD.folderinuse(self.testfolder), True) # Checks if any file inside the folder is beign used.\n\t\tf.close()\n\t\tself.assertEqual (MD.fileinuse(testfilepath), False) # Cheks a file that it is closed.\n\t\tself.assertEqual (MD.folderinuse(self.testfolder), False)", "def test_file_shizz(topic_path):\n\tL = [[1,2,3],[4,5,6]]\n\tprint(open(topic_path+'/D/'+str(L[1][0])+'.txt'))", "def test_print_drug_info(self):\n\n pwd = self.get_script_path()\n fout1 = self.test_output_file\n fout2 = pwd+'/../insight_testsuite/tests/my_test/output/test_output_file_2.txt'\n print_drug_info(self.test_sorted_tuple, self.test_dict, self.test_num_unique_name, self.test_total_cost_each_drug, fout2, 2)\n self.assertTrue(filecmp.cmp(fout1, fout2))", "def test_DL_import_equals_export(self):\n filepath = '2.txt'\n original_dl = flow_processing_input.DetectorsLocation(2021)\n original_dl.detectors_location_dict = createDLDataset(10).dataset\n original_dl.export_to_file(filepath)\n new_dl = flow_processing_input.DetectorsLocation(2021, filepath)\n os.remove(filepath)\n # Check if new_dl contains the same attributes as the original_dl\n self.assertTrue(new_dl == original_dl)\n # Check if new_dl is not equal to a different DL of same size\n random_dl = flow_processing_input.DetectorsLocation(2021)\n random_dl.detectors_location_dict = createDLDataset(10).dataset\n self.assertFalse(new_dl == random_dl)", "def test_fid(self):\n g = h5g.create(self.fid, '/foobar')\n x = h5i.get_file_id(g)\n self.assertIsInstance(x, h5f.FileID)\n self.assertEqual(x, self.fid)\n self.assertEqual(h5i.get_ref(x), 1)", "def test_DL_import_from_constructor(self):\n filepath = '7.txt'\n original_dl = flow_processing_input.DetectorsLocation(2021)\n original_dl.detectors_location_dict = createDLDataset(10).dataset\n original_dl.export_to_file(filepath)\n new_dl = flow_processing_input.DetectorsLocation(2021, filepath)\n os.remove(filepath)\n # Check if new_dl contains the same attributes as the original_dl\n self.assertTrue(new_dl == original_dl)", "def test_fobj():\n Level3File(get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids'))", "def test_read_file_from_disk(self):\r\n actual_data = read_file_from_disk(self.test_file1)\r\n self.assertEqual(self.test_file1_data, actual_data)", "def test_is_not_delicious_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')\r\n bad_file.seek(0)\r\n\r\n self.assertTrue(\r\n not DelImporter.can_handle(bad_file),\r\n \"DelImporter cannot handle this file\")\r\n\r\n bad_file.close()", "def test_get_db_list_from_file(): # ***Incomplete test\n ##########################\n # Arrange.\n infp = \"infp\"\n\n ##########################\n # Act.\n #x = get_db_list_from_file(infp)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "def _test(self, file_name):\n data = bob.io.base.load(file_name)\n assert (_data == data).all()", "def test_read_from_file():\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert \"\\n\" not in d.read_code_from_file()", "def testOneShow(self):\n\t\t# for line in self.file:\n\t\t# \tprint line\n\t\tline = self.file.readline()\n\t\tinfo = scrapeFilename( line )\n\t\tassert info['show'] == \"Chuck\"", "def test_file_reader(self):\r\n a = list(file_reader(\"student_majors.txt\", 3, sep='|', header=True))\r\n\r\n b = [('123', 'Jin He', 'Computer Science'), ('234', 'Nanda Koka', 'Software Engineering'), \\\r\n ('345', 'Benji Cai', 'Software Engineering')]\r\n self.assertEqual(a, b)\r\n \r\n c = [(\"CWID\", \"Name\", \"Major\"), ('123', 'Jin He', 'Computer Science'), ('234', 'Nanda Koka', 'Software Engineering'), \\\r\n ('345', 'Benji Cai', 'Software Engineering')]\r\n self.assertNotEqual(a,c)", "def test_printdiff(self):\n\n # Testing different string input options\n assert printdiff(self.data('arange.fits'),\n self.data('blank.fits')) is None\n assert printdiff(self.data('arange.fits'),\n self.data('blank.fits'), ext=0) is None\n assert printdiff(self.data('o4sp040b0_raw.fits'),\n self.data('o4sp040b0_raw.fits'),\n extname='sci') is None\n\n # This may seem weird, but check printdiff to see, need to test\n # incorrect second file\n with pytest.raises(IOError):\n printdiff('o4sp040b0_raw.fits', 'fakefile.fits', extname='sci')\n\n # Test HDU object inputs\n with fits.open(self.data('stddata.fits'), mode='readonly') as in1:\n with fits.open(self.data('checksum.fits'), mode='readonly') as in2:\n\n assert printdiff(in1[0], in2[0]) is None\n\n with pytest.raises(ValueError):\n printdiff(in1[0], in2[0], ext=0)\n\n assert printdiff(in1, in2) is None\n\n with pytest.raises(NotImplementedError):\n printdiff(in1, in2, 0)", "def test_printdiff(self):\n\n # Testing different string input options\n assert printdiff(self.data(\"arange.fits\"), self.data(\"blank.fits\")) is None\n assert (\n printdiff(self.data(\"arange.fits\"), self.data(\"blank.fits\"), ext=0) is None\n )\n assert (\n printdiff(\n self.data(\"o4sp040b0_raw.fits\"),\n self.data(\"o4sp040b0_raw.fits\"),\n extname=\"sci\",\n )\n is None\n )\n\n # This may seem weird, but check printdiff to see, need to test\n # incorrect second file\n with pytest.raises(OSError):\n printdiff(\"o4sp040b0_raw.fits\", \"fakefile.fits\", extname=\"sci\")\n\n # Test HDU object inputs\n with fits.open(self.data(\"stddata.fits\"), mode=\"readonly\") as in1:\n with fits.open(self.data(\"checksum.fits\"), mode=\"readonly\") as in2:\n assert printdiff(in1[0], in2[0]) is None\n\n with pytest.raises(ValueError):\n printdiff(in1[0], in2[0], ext=0)\n\n assert printdiff(in1, in2) is None\n\n with pytest.raises(NotImplementedError):\n printdiff(in1, in2, 0)", "def test_real_file(self):\n log.info('===== START TEST BYTE LOSS =====')\n\n # Recovered\n file_path = os.path.join(RESOURCE_PATH, '11079364_SNA_SNA.txt')\n\n stream_handle = open(file_path, MODE_ASCII_READ)\n\n self.create_parser(stream_handle, telem_flag=False)\n\n particles = self.parser.get_records(182)\n\n log.debug(\"*** test_real_file Num particles %s\", len(particles))\n\n # check all the values against expected results.\n self.assert_particles(particles, '11079364_SNA_SNA_recov.yml', RESOURCE_PATH)\n self.assertEquals(self.exception_callback_value, [])\n stream_handle.close()\n\n # Telemetered\n file_path = os.path.join(RESOURCE_PATH, '11079419_SNA_SNA.txt')\n\n stream_handle = open(file_path, MODE_ASCII_READ)\n\n self.create_parser(stream_handle)\n\n particles = self.parser.get_records(172)\n\n log.debug(\"*** test_real_file Num particles %s\", len(particles))\n\n # check all the values against expected results.\n self.assert_particles(particles, '11079419_SNA_SNA_telem.yml', RESOURCE_PATH)\n stream_handle.close()\n\n log.info('===== END TEST REAL FILE =====')", "def testReadDataFile(self):\n try:\n blockNameList = []\n myReader = ParseCifSimple(self.__pathPdbxDataFile, False, 0, 255, \"?\", self.__logFileName)\n blockNameList = myReader.GetBlockNames(blockNameList)\n #\n for blockName in blockNameList:\n block = myReader.GetBlock(blockName)\n tableNameList = []\n tableNameList = block.GetTableNames(tableNameList)\n for tableName in tableNameList:\n table = block.GetTable(tableName)\n columnNameList = table.GetColumnNames()\n logger.debug(\"Table %s colunms %r\", tableName, columnNameList)\n numRows = table.GetNumRows()\n rowList = []\n for iRow in range(0, numRows):\n row = table.GetRow(iRow)\n rowList.append(row)\n logger.debug(\"table %s row length %d\", tableName, len(rowList))\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_handle_info_reading():\n bids_root = _TempDir()\n\n # read in USA dataset, so it should find 50 Hz\n raw = _read_raw_fif(raw_fname)\n\n # write copy of raw with line freq of 60\n # bids basename and fname\n bids_path = BIDSPath(subject='01', session='01',\n task='audiovisual', run='01',\n root=bids_root)\n suffix = \"meg\"\n bids_fname = bids_path.copy().update(suffix=suffix,\n extension='.fif')\n write_raw_bids(raw, bids_path, overwrite=True)\n\n # find sidecar JSON fname\n bids_fname.update(datatype=suffix)\n sidecar_fname = _find_matching_sidecar(bids_fname, suffix=suffix,\n extension='.json')\n\n # assert that we get the same line frequency set\n raw = read_raw_bids(bids_path=bids_path)\n assert raw.info['line_freq'] == 60\n\n # 2. if line frequency is not set in raw file, then ValueError\n raw.info['line_freq'] = None\n with pytest.raises(ValueError, match=\"PowerLineFrequency .* required\"):\n write_raw_bids(raw, bids_path, overwrite=True)\n\n # make a copy of the sidecar in \"derivatives/\"\n # to check that we make sure we always get the right sidecar\n # in addition, it should not break the sidecar reading\n # in `read_raw_bids`\n deriv_dir = op.join(bids_root, \"derivatives\")\n sidecar_copy = op.join(deriv_dir, op.basename(sidecar_fname))\n os.mkdir(deriv_dir)\n with open(sidecar_fname, \"r\", encoding='utf-8') as fin:\n sidecar_json = json.load(fin)\n sidecar_json[\"PowerLineFrequency\"] = 45\n _write_json(sidecar_copy, sidecar_json)\n raw = read_raw_bids(bids_path=bids_path)\n assert raw.info['line_freq'] == 60\n\n # 3. assert that we get an error when sidecar json doesn't match\n _update_sidecar(sidecar_fname, \"PowerLineFrequency\", 55)\n with pytest.raises(ValueError, match=\"Line frequency in sidecar json\"):\n raw = read_raw_bids(bids_path=bids_path)\n assert raw.info['line_freq'] == 55", "def test_DL_import_wrong_file_unserialized(self):\n filepath = '6.txt'\n wrong_file = open(filepath, 'x')\n wrong_file.write(\"This is not a serialized detector flow data\")\n wrong_file.close()\n # Check if exception was raised for wrong serialization\n with self.assertRaises(Exception):\n flow_processing_input.DetectorsLocation(9999, filepath)\n os.remove(filepath)", "def test_file_access():\n file = gff.GFFFile()\n entry_scaffold = (\"ab\", \"cd\", 1, 2, None, None, None, {\"Id\":\"foo\"})\n entry = (\"a\",) + entry_scaffold\n file.append(*entry)\n assert file[0] == entry\n file.append(*((\"b\",) + entry_scaffold))\n file.insert(1, *((\"c\",) + entry_scaffold))\n file[1] = (\"d\",) + entry_scaffold\n file.insert(3, *((\"e\",) + entry_scaffold))\n del file[2]\n assert [seqid for seqid, _, _, _, _, _, _, _, _ in file] \\\n == [\"a\", \"d\", \"e\", ]", "def test_is_not_delicious_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')\r\n bad_file.seek(0)\r\n\r\n self.assertTrue(\r\n not DelXMLImporter.can_handle(bad_file),\r\n \"DelXMLImporter cannot handle this file\")\r\n\r\n bad_file.close()", "def test_the_main_with_simple_yet_proper_file(self):\r\n assert the_main_function('test_proper_file.csv') == (['\\ufeffid', 'link'], [['1', 'abc.com'], ['2', 'gef.com']])", "def idfreader1(fname, iddfile, theidf, conv=True, commdct=None, block=None):\n versiontuple = iddversiontuple(iddfile)\n # import pdb; pdb.set_trace()\n block, data, commdct, idd_index = readidf.readdatacommdct1(\n fname, iddfile=iddfile, commdct=commdct, block=block\n )\n if conv:\n convertallfields(data, commdct, block)\n # fill gaps in idd\n ddtt, dtls = data.dt, data.dtls\n if versiontuple < (8,):\n skiplist = [\"TABLE:MULTIVARIABLELOOKUP\"]\n else:\n skiplist = None\n nofirstfields = iddgaps.missingkeys_standard(commdct, dtls, skiplist=skiplist)\n iddgaps.missingkeys_nonstandard(block, commdct, dtls, nofirstfields)\n # bunchdt = makebunches(data, commdct)\n bunchdt = makebunches_alter(data, commdct, theidf, block)\n return bunchdt, block, data, commdct, idd_index, versiontuple", "def test():\n from pylada.dftcrystal.functional import Functional\n from pylada.dftcrystal import Crystal\n from pylada.dftcrystal.parse import parse\n parsed = parse(string)\n structure = Crystal()\n structure.read_input(parsed['rutile']['CRYSTAL'])\n a = Functional()\n a.read_input(parsed)\n assert a.scfdir \n assert a.maxcycle == 300\n assert a.exchsize == 6937578\n # need structure otherwise parse can't find beginning of input.\n otherstring = a.print_input(structure=structure)\n otherparsed = parse(otherstring)\n b = Functional()\n b.read_input(otherparsed)\n assert otherstring == b.print_input(structure=structure)", "def test_reader(self):\n default_list_file = GAMEBASE + \"/Lists/char-LIST.tex\"\n output = name_pdfs.read_names_file(default_list_file)\n self.assertIsInstance(output, dict)\n self.assertGreater(len(output), 0)\n # Check that the function returns a dict ✓\n # Of greater than length 0 ✓\n fname = \"\"\n for example in self.bad_filenames:\n with self.subTest(fname=example):\n with self.assertRaises(OSError):\n name_pdfs.read_names_file(fname)", "def test_file_reader(self) -> None:\n result = [['123', 'Jin He', 'Computer Science'],\n ['234', 'Nanda Koka', 'Software Engineering'],\n ['345', 'Benji Cai', 'Software Engineering']]\n # file have header\n self.assertTrue(\n list(file_reader('C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 3, '|', True)) == result)\n # file without header\n self.assertFalse(\n list(file_reader('C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 3, '|')) == result)\n # More than 3 datafield\n with self.assertRaises(ValueError):\n list(file_reader(\n 'C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 4, '|', True))\n # file not found\n with self.assertRaises(FileNotFoundError):\n list(file_reader('abc.txt', 3, '|', True))", "def test_open_ped_duplicate_person(self):\n \n self.temp.write('A B 0 0 1 1\\n')\n self.temp.write('A B 0 0 1 1\\n')\n self.temp.flush()\n \n with self.assertRaises(ValueError):\n open_ped(self.temp.name)", "def _read_file(self):\n\n with open(self.file_name, 'rb') as f:\n new_test = struct.unpack('<l', f.read(8)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n old_test = struct.unpack('<h', f.read(6)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n other_test = struct.unpack('<l', f.read(20)[16:])[0]\n f.close()\n\n open_file = open(self.file_name, 'rb')\n\n if (other_test==202):\n raw = open_file.read(1236)[11:]\n self.model = '202'\n elif ((not new_test==102) and old_test==102):\n raw = open_file.read(1133)\n self.model = '102old'\n elif (new_test==102 and old_test==102):\n raw = open_file.read(1224)\n self.model = '102new'\n\n self.header = DpHeader(raw, self.model)\n\n self.data = DpData(open_file, \n self.model, \n self.header.interferogram_size, \n self.header.number_of_coadds, \n 2048*self.header.zero_fill,\n self.header.laser_wavelength_microns, \n self.header.dispersion_constant_xm,\n self.header.dispersion_constant_xb)\n\n open_file.close()", "def test_identify_contents_1(self):\n Path(self.base_dir, \"new_dir\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n list_of_items = basic.identify_contents(self.base_dir, kind=\"file\")\n exp_num_items = 2\n self.assertEqual(len(list_of_items), exp_num_items)", "def test_basic():\n f = Level3File(get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids', as_file_obj=False))\n assert f.metadata['prod_time'].replace(second=0) == datetime(2014, 4, 7, 18, 5)\n assert f.metadata['vol_time'].replace(second=0) == datetime(2014, 4, 7, 18, 5)\n assert f.metadata['msg_time'].replace(second=0) == datetime(2014, 4, 7, 18, 6)\n assert f.filename == get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids',\n as_file_obj=False)\n\n # At this point, really just want to make sure that __str__ is able to run and produce\n # something not empty, the format is still up for grabs.\n assert str(f)", "def test_file(self):\n a = False\n if \"show()\" in open('attempt.py').read():\n a = True\n self.assertEquals(a,True)", "def test_nonfile(self):\n self.assertEqual(None,readfiles.read_file(\"tests.txt))", "def test_get_query_list_from_file(): # ***Incomplete test\n ##########################\n # Arrange.\n infp = \"infp\"\n\n ##########################\n # Act.\n #x = get_query_list_from_file(infp)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "def test_read_identity(self):\n pass", "def test_factory_gives_delicious(self):\r\n loc = os.path.dirname(__file__)\r\n del_file = os.path.join(loc, 'delicious.html')\r\n\r\n with open(del_file) as del_io:\r\n imp = Importer(del_io, username=u\"admin\")\r\n\r\n self.assertTrue(\r\n isinstance(imp, DelImporter),\r\n \"Instance should be a delimporter instance\")", "def test_open_ped(self):\n \n self.temp.write('A B 0 0 1 1\\n')\n self.temp.flush()\n families = open_ped(self.temp.name)\n \n fam = Family('A')\n fam.add_person(Person('A', 'B', '0', '0', '1', '1'))\n \n self.assertEqual(families[0].nodes, fam.nodes)", "def test_read_file():\n filename = 'sample'\n assert read_file(filename) == 'hello!\\n'", "def diet_retrieve(file_name):\n with open(file_name, \"r\") as f:\n content = f.read()\n return print(content)", "def test_call_infernal_test1_file_output(self):\r\n # do not collect results; check output files instead\r\n actual = self.infernal_test1_aligner(\r\n self.infernal_test1_input_fp, result_path=self.result_fp,\r\n log_path=self.log_fp)\r\n\r\n self.assertTrue(actual is None,\r\n \"Result should be None when result path provided.\")\r\n\r\n expected_aln = self.infernal_test1_expected_aln\r\n with open(self.result_fp) as result_f:\r\n actual_aln = Alignment.from_fasta_records(parse_fasta(\r\n result_f), DNA)\r\n self.assertEqual(actual_aln, expected_aln)", "def load_patient_data():\n data_file = open(\"test_data.txt\", \"r\")\n still_finding_patients = True\n my_patients = []\n while still_finding_patients is True:\n name_line = next(data_file)\n if name_line != \"END\":\n name_line = name_line.split()\n fname = name_line[0]\n lname = name_line[1]\n age = next(data_file).strip()\n gender = next(data_file).strip().casefold()\n tsh_data = next(data_file)\n tsh_data = tsh_data.strip().split(\",\")\n tsh_data.remove(\"TSH\")\n new_patient = create_patient(fname, lname, age, gender, tsh_data)\n my_patients.append(new_patient)\n else:\n still_finding_patients = False\n data_file.close()\n return my_patients", "def test_artemis_reader():\n _test_raw_reader(\n read_raw_artemis123,\n input_fname=short_hpi_1kz_fname,\n pos_fname=dig_fname,\n verbose=\"error\",\n )", "def test_readfile(self):\n fname = os.path.join(self.datadir, 'monol_testA_E3-50_rebin4_gti') + \\\n HEN_FILE_EXTENSION\n command = \"{0}\".format(fname)\n\n hen.io.main(command.split())", "def test_reader(file_dir,\n word2id_dict,\n label2id_dict,\n word_replace_dict,\n filename_feature=\"\"):\n word_dict_len = max(map(int, word2id_dict.values())) + 1\n label_dict_len = max(map(int, label2id_dict.values())) + 1\n\n def reader():\n \"\"\"\n the data generator\n \"\"\"\n index = 0\n for root, dirs, files in os.walk(file_dir):\n for filename in files:\n if not filename.startswith(filename_feature):\n continue\n for line in io.open(\n os.path.join(root, filename), 'r', encoding='utf8'):\n index += 1\n bad_line = False\n line = line.strip(\"\\n\")\n if len(line) == 0:\n continue\n seg_tag = line.rfind(\"\\t\")\n if seg_tag == -1:\n seg_tag = len(line)\n word_part = line[0:seg_tag]\n label_part = line[seg_tag + 1:]\n word_idx = []\n words = word_part\n for word in words:\n if ord(word) < 0x20:\n word = ' '\n if word in word_replace_dict:\n word = word_replace_dict[word]\n if word in word2id_dict:\n word_idx.append(int(word2id_dict[word]))\n else:\n word_idx.append(int(word2id_dict[\"OOV\"]))\n yield word_idx, words\n\n return reader", "def test_checkForOldParents(self):\n f = open(\"Output.txt\", \"a+\")\n fam: Dict = {'F23':\n {'fam': 'F23', 'MARR': '14 FEB 1980', 'HUSB': 'I01', 'WIFE': 'I07',\n 'CHIL': ['I19', 'I26', 'I30']},\n 'F16': {'fam': 'F16', 'MARR': '12 DEC 2007'}}\n fam2: Dict = {'F23': {'fam': 'F23', 'MARR': '14 FEB 1980', 'HUSB': 'I01', 'WIFE': 'I07', 'CHIL': ['I19']}}\n\n ind1: Dict = {'I01': {'id': 'I01', 'name': 'Joe /Smith/', 'BIRT': '15 JUL 1900', 'sex': 'M', 'family': 'F23',\n 'DEAT': '31 DEC 2013'},\n 'I07': {'id': 'I07', 'name': 'Jennifer /Smith/', 'BIRT': '23 SEP 1902', 'sex': 'F',\n 'family': 'F23',\n 'DEAT': '31 DEC 2013'},\n 'I19': {'id': 'I19', 'name': 'Dick /Smith/', 'BIRT': '13 FEB 1999', 'sex': 'M', 'family': 'F23',\n 'DEAT': '31 DEC 2013'}}\n\n ind2: Dict = {'I01': {'id': 'I01', 'name': 'Joe /Smith/', 'BIRT': '15 JUL 1960', 'sex': 'M', 'family': 'F23',\n 'DEAT': '31 DEC 2013'},\n 'I07': {'id': 'I07', 'name': 'Jennifer /Smith/', 'BIRT': '23 SEP 1960', 'sex': 'F',\n 'family': 'F23'},\n 'I19': {'id': 'I19', 'name': 'Dick /Smith/', 'BIRT': '13 FEB 1981', 'sex': 'M', 'family': 'F23'},\n 'I26': {'id': 'I26', 'name': 'Jane /Smith/', 'BIRT': '13 FEB 1981', 'sex': 'F', 'family': 'F23'},\n 'I30': {'id': 'I30', 'name': 'Mary /Test/', 'BIRT': '13 FEB 1981', 'sex': 'F', 'family': 'F23'},\n 'I32': {'id': 'I32', 'name': 'Nick /Tary/', 'BIRT': '13 FEB 1981', 'sex': 'M', 'family': 'F23'},\n 'I44': {'id': 'I44', 'name': 'Cersi /Lanister/', 'BIRT': '13 FEB 1981', 'sex': 'F',\n 'family': 'F23'}}\n\n self.assertTrue(us.checkForOldParents(fam, ind2, f))\n self.assertFalse(us.checkForOldParents(fam2, ind1, f))", "def test_read_delete(self):\n\n expected = \"Hello, World! This has been written by Fun Ilrys.\"\n File(\"hi\").write(expected)\n actual = File(\"hi\").read()\n\n self.assertEqual(expected, actual)\n\n expected = False\n File(\"hi\").delete()\n actual = PyFunceble.path.isfile(\"hi\")\n\n self.assertEqual(expected, actual)", "def test_identify_contents_2(self):\n Path(self.base_dir, \"new_dir\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n ignore_set = set([\".DS_Store\"])\n list_of_items = basic.identify_contents(self.base_dir, kind=\"file\",\n ignore_set=ignore_set)\n exp_num_items = 1\n self.assertEqual(len(list_of_items), exp_num_items)", "def idfreader(fname, iddfile, conv=True):\n data, commdct, idd_index = readidf.readdatacommdct(fname, iddfile=iddfile)\n if conv:\n convertallfields(data, commdct)\n # fill gaps in idd\n ddtt, dtls = data.dt, data.dtls\n # skiplist = [\"TABLE:MULTIVARIABLELOOKUP\"]\n nofirstfields = iddgaps.missingkeys_standard(\n commdct, dtls, skiplist=[\"TABLE:MULTIVARIABLELOOKUP\"]\n )\n iddgaps.missingkeys_nonstandard(None, commdct, dtls, nofirstfields)\n bunchdt = makebunches(data, commdct)\n return bunchdt, data, commdct, idd_index", "def test_call_requires_read_1_file(self):\r\n p = RtaxTaxonAssigner({\r\n 'reference_sequences_fp': self.reference_seqs_fp,\r\n 'id_to_taxonomy_fp': self.id_to_taxonomy_fp})\r\n\r\n # no read_1_seqs_fp passed results in AssertionError\r\n self.assertRaises(AssertionError, p, self.input_seqs_fp)", "def test_GFD_import_equals_export(self):\n filepath = '2.txt'\n original_gfd = flow_processing_input.GroundFlowData()\n original_gfd.detector_flow_data = createGFDDataset(10).dataset\n original_gfd.export_to_file(filepath)\n new_gfd = flow_processing_input.GroundFlowData(filepath)\n os.remove(filepath)\n # Check if new_gfd contains the same attributes as the original_gfd\n self.assertTrue(new_gfd == original_gfd)\n # Check if new_gfd is not equal to a different gfd of same size\n random_gfd = flow_processing_input.GroundFlowData()\n random_gfd.detector_flow_data = createGFDDataset(10).dataset\n self.assertFalse(new_gfd == random_gfd)", "def test_doubled_file():\n with contextlib.closing(get_test_data('Level2_KFTG_20150430_1419.ar2v')) as infile:\n data = infile.read()\n fobj = BytesIO(data + data)\n f = Level2File(fobj)\n assert len(f.sweeps) == 12", "def test_DL_import_wrong_file_serialized(self):\n filepath = '5.txt'\n with open(filepath, 'wb') as file:\n pickle.dump([\"This is a wrong dataset\"], file)\n # Check if exception was raised for wrong data type\n with self.assertRaises(Exception):\n flow_processing_input.DetectorsLocation(9999, filepath)\n os.remove(filepath)", "def main():\n print \"=\" * 78\n print \"%s %s\" % (__prog_name__, __version__)\n debug, input_file_names = check_cli()\n if not input_file_names:\n _error(\"No input file name found!\\n\\n%s\" % __help__)\n for input_file_name in input_file_names:\n print \"* Reading\", input_file_name\n file_base_name = os.path.splitext(os.path.basename(input_file_name))[0]\n file_dir_name = os.path.dirname(input_file_name)\n sections = {}\n tex_map = {}\n with open(input_file_name, 'rU') as in_fd:\n sections = get_sections(in_fd.read())\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"sec\",\n {\"sections\": sections})\n\n if not sections:\n _error(\"Nothing could be read from '%s'.\\nIs this an Oolite .dat file?\" \\\n % input_file_name)\n\n # Magically call the 'check' functions\n for name in sections.keys():\n f_name = \"check_%s\" % name.lower()\n if f_name in globals().keys():\n if not globals()[f_name](sections):\n _error(\"Number of entries in '%s' section is different as declared!\" % name)\n\n def get_data(name, sections=sections):\n \"\"\"Returns the 'data' object from the 'name' one found in the\n 'sections' one.\n :sections: dictionary: Object returned by 'get_sections'.\n :name: string: The name of the section to get the 'data'.\n Returns a list of 'lines'.\n \"\"\"\n return sections.get(name, {}).get(\"data\", [])\n\n oti_file_name = build_file_path(file_dir_name, file_base_name, \"oti\")\n tex_map = parse_names(get_data(\"NAMES\"), oti_file_name)\n\n tex_refs, tex_lines_out = parse_textures(get_data(\"TEXTURES\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"tex\",\n {\"tex_refs\": tex_refs,\n \"tex_lines_out\": tex_lines_out})\n\n # Update the tex_map object if textures indexes and names are both\n # used in 'TEXTURES'.\n if sorted(tex_map.keys()) != sorted(tex_refs.get(\"named\").keys()):\n tex_map = update_tex_map(tex_map,\n set(tex_refs[\"named\"].keys()).difference(tex_map.keys()))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"txm\",\n {\"tex_map\": tex_map})\n\n n_verts, vertex_lines_out = parse_vertex(get_data(\"VERTEX\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"ver\",\n {\"n_verts\": n_verts,\n \"vertex_lines_out\": vertex_lines_out})\n\n n_normals, normals_lines_out = parse_normals(get_data(\"NORMALS\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"nor\",\n {\"n_normals\": n_normals,\n \"normals_lines_out\": normals_lines_out})\n\n n_faces, faces_groups = parse_faces(get_data(\"FACES\"), tex_refs,\n normals_lines_out)\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"fac\",\n {\"n_faces\": n_faces,\n \"faces_groups\": faces_groups})\n\n output_file_name = build_file_path(file_dir_name,\n file_base_name, 'obj')\n material_file_name = build_file_path(file_dir_name,\n file_base_name, 'mtl')\n mtl_lib_file = os.path.basename(material_file_name)\n\n write_obj(output_file_name, file_base_name, mtl_lib_file,\n tex_lines_out, tex_map, n_verts, vertex_lines_out,\n n_normals, normals_lines_out, n_faces, faces_groups)\n\n write_mtl(material_file_name, tex_map)\n\n _exit(\"* Done\")", "def test_file_analyzer(self):\r\n file_analyzer = FileAnalyzer(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 8\")\r\n self.assertEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 1, 'function': 5, 'line': 100, 'char': 4472}, \\\r\n 'HW08_Test_Himanshu.py': {'class': 1, 'function': 3, 'line': 38, 'char': 1861}})\r\n self.assertNotEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 0, 'function': 5, 'line': 46, 'char': 1931}})\r\n\r\n self.assertNotEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 1, 'function': 5, 'line': 100}}) # testing less fields\r\n\r\n with self.assertRaises(FileNotFoundError): # raises exception error\r\n FileAnalyzer(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 10\").files_summary", "def test_hdf5_reader_dos(data_regression):\n from masci_tools.io.parsers.hdf5 import HDF5Reader\n from masci_tools.io.parsers.hdf5.recipes import FleurDOS\n\n TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_dos.hdf')\n\n with open(TEST_BANDDOS_FILE, 'rb') as file:\n with HDF5Reader(file) as reader:\n data, attrs = reader.read(recipe=FleurDOS)\n\n data_regression.check({'datasets': convert_to_pystd(data), 'attributes': convert_to_pystd(attrs)})", "def test_main(data, tmp_path):\n\n main(data, tmp_path)\n\n FILES = (\n \"gd32f888x(0-1)xx-pinctrl.h\",\n \"gd32f888x(2-3)xx-pinctrl.h\",\n \"gd32f888y(0-1)xx-pinctrl.h\",\n \"gd32f999x(0-1)xx-pinctrl.h\",\n \"gd32f999x(2-3)xx-pinctrl.h\",\n \"gd32f999y(0-1)xx-pinctrl.h\",\n )\n\n for file in FILES:\n ref_file = data / file\n gen_file = tmp_path / file\n\n assert gen_file.exists()\n\n with open(ref_file) as ref, open(gen_file) as gen:\n assert ref.read() == gen.read()", "def test_GFD_import_from_constructor(self):\n filepath = '7.txt'\n original_gfd = flow_processing_input.GroundFlowData()\n original_gfd.detector_flow_data = createGFDDataset(10).dataset\n original_gfd.export_to_file(filepath)\n new_gfd = flow_processing_input.GroundFlowData(filepath)\n os.remove(filepath)\n # Check if new_gfd contains the same attributes as the original_gfd\n self.assertTrue(new_gfd == original_gfd)", "def test_identify_contents_7(self):\n Path(self.base_dir, \"new_dir1\").mkdir()\n Path(self.base_dir, \"new_dir2\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n list_of_items = basic.identify_contents(self.base_dir, kind=\"invalid\")\n self.assertIsNone(list_of_items)", "def test_whole_file(self):\n import soundfile\n import numpy as np\n\n data_file_path = \"sample-data/flacformats.d2\"\n expected_format = \"FLAC\"\n expected_subtype = \"PCM_24\"\n\n # Read the file using standard file I/O\n sf1 = soundfile.SoundFile(data_file_path)\n self.assertEqual(sf1.format, expected_format)\n self.assertEqual(sf1.subtype, expected_subtype)\n data1 = sf1.read()\n\n # Read the file using HTTP\n with open(data_file_path, \"rb\") as f:\n file_content = {\"/foo.dat\": f.read()}\n with DummyHTTPServer(file_content) as server:\n url = server.url(\"/foo.dat\")\n file2 = wfdb.io._url.openurl(url, \"rb\")\n sf2 = soundfile.SoundFile(file2)\n self.assertEqual(sf2.format, expected_format)\n self.assertEqual(sf2.subtype, expected_subtype)\n data2 = sf2.read()\n\n # Check that results are equal\n np.testing.assert_array_equal(data1, data2)", "def testGetFile(self):\n try:\n remoteLocator = self.__pathPdbxDictionaryFile\n fn = self.__fileU.getFileName(remoteLocator)\n # _, fn = os.path.split(remoteLocator)\n lPath = os.path.join(self.__workPath, fn)\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n ok = self.__fileU.remove(lPath)\n self.assertTrue(ok)\n dPath = os.path.join(self.__workPath, \"tdir\")\n ok = self.__fileU.mkdir(dPath)\n self.assertTrue(ok)\n ok = self.__fileU.remove(dPath)\n self.assertTrue(ok)\n ok = self.__fileU.remove(\";lakdjf\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def sniff( self, filename ):\n handle = open(filename)\n line = handle.readline()\n handle.close()\n first = line.split()\n\n # 0 -> ID animal\n #read fixed effect\n i=1\n while (first[i].isalnum() and i<len(first)):\n i=i+1\n\n if ( i >= len(first) ):\n return False\n\n #read cov\n while ((i+1)<len(first) and not first[i+1].isalnum() ):\n i=i+1\n\n if ( i+1 >= len(first) ):\n return False\n\n #read trait\n while ((i+1)<len(first) and (i+2)<len(first) and not first[i].isalnum() and first[i+1].isalnum() and first[i+2].isalnum()):\n i=i+3\n\n if ( i != len(first) ):\n return False\n\n return True", "def test_read_file_to_list():\n new_file = \"Hello,World\"\n data = race.read_file_to_list()\n assert type(data) == list\n for x in data:\n assert type(x) == dict\n assert len(data) == 33\n with pytest.raises(FileNotFoundError):\n Race(new_file).read_file_to_list()", "def test_open_ped_mismatch(self):\n \n self.temp.write('A B 0 0 1 1\\n')\n self.temp.flush()\n families = open_ped(self.temp.name)\n \n fam = Family('A')\n fam.add_person(Person('A', 'B', '0', '0', '1', '1'))\n fam.add_person(Person('A', 'C', '0', '0', '1', '1'))\n \n self.assertNotEqual(families[0].nodes, fam.nodes)", "def test_read():\n f = open('test', mode='r')\n line = f.read()\n f.close()", "def test_two_files():\n\n out_file = ''.join(\n random.choices(string.ascii_uppercase + string.digits, k=5))\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {tair} {amigo} -o {out_file}')\n assert rv == 0\n assert re.search('1: tair_heat.txt', out)\n assert re.search('2: amigo_heat.txt', out)\n assert re.search(\n f'Wrote 20 gene IDs from 2 files to file \"{out_file}\"', out)\n assert os.path.isfile(out_file)\n exp_two = '\\n'.join(\n sorted(\"\"\"\n AT5G12020 AT3G06400 AT2G33590 AT1G54050 AT5G67030 AT4G14690 AT1G16030 AT5G03720 AT3G10800 \n AT5G12140 AT1G64280 AT3G24500 AT3G09440 AT3G04120 AT4G19630 AT1G16540 AT2G22360 AT1G13930 \n AT5G41340 AT3G24520\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_two.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def test_GFD_import_wrong_file_unserialized(self):\n filepath = '6.txt'\n wrong_file = open(filepath, 'x')\n wrong_file.write(\"This is not a serialized detector flow data\")\n wrong_file.close()\n # Check if exception was raised for wrong serialization\n with self.assertRaises(Exception):\n flow_processing_input.GroundFlowData(filepath)\n os.remove(filepath)", "def test_get_file_accessors(self):\n pass", "def test_get_file_object(self):\n pass", "def test_fileobj_not_closed(self):\n\n f = open(self.data('test0.fits'), 'rb')\n data = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n header = fits.getheader(f)\n assert not f.closed", "def read(self):\n if self.getiddname() is None:\n errortxt = (\n \"IDD file needed to read the idf file. \"\n \"Set it using IDF.setiddname(iddfile)\"\n )\n raise IDDNotSetError(errortxt)\n readout = idfreader1(\n self.idfname, self.iddname, self, commdct=self.idd_info, block=self.block\n )\n (self.idfobjects, block, self.model, idd_info, idd_index, idd_version) = readout\n self.setidd(idd_info, idd_index, block, idd_version)", "def test_nids_supplemental(fname, truth):\n f = Level3File(get_test_data(fname))\n assert f.metadata['delta_time'] == truth[0]\n assert f.metadata['supplemental_scan'] == truth[1]", "def test_sanity_ati1():\n\n with Image.open(TEST_FILE_ATI1) as im:\n im.load()\n\n assert im.format == \"DDS\"\n assert im.mode == \"L\"\n assert im.size == (64, 64)\n\n assert_image_equal_tofile(im, TEST_FILE_ATI1.replace(\".dds\", \".png\"))", "def F(f):\n return datafile(f, __name__)", "def test_file_has_details(self):\n file_content = bytes(\n \"\"\"\n 1001 | Eggs | 12\\n\n 1002 | Bread | 1\\n\n 1003 | Butter | 1\\n\n 1004 | Milk | 1\\n\n 1005 | Corn | 2\n \"\"\", encoding='UTF-8'\n )\n\n test_file = SimpleUploadedFile(\n \"sample.txt\", file_content, content_type=\"text/txt\")\n \n new_file = File.objects.create(\n file=test_file,\n rows=5,\n items='Eggs, Bread, Butter, Milk, Corn'\n )\n\n file_details = File.objects.get(id=new_file.id)\n self.assertEqual(file_details.file.name, new_file.file.name)\n self.assertEqual(file_details.rows, new_file.rows)\n self.assertEqual(file_details.items, new_file.items)", "def test_open_ped_header(self):\n \n self.temp.write('family_id person_id dad mom sex phenotype\\n')\n self.temp.write('A B 0 0 1 1\\n')\n self.temp.flush()\n families = open_ped(self.temp.name)\n \n fam = Family('A')\n fam.add_person(Person('A', 'B', '0', '0', '1', '1'))\n \n self.assertEqual(families[0].nodes, fam.nodes)", "def OpenDosi(filename=None):\n\tglobal dosi, spacing_dosi, dim_x_dosi, dim_y_dosi, dim_z_dosi, dosi_open, isodose_show, origin_dosi, filename_dosi\n\tdosi_swapY,dosi_swapZ = False, False\n\n\ttypes = [('All files', '*.dcm *.mhd'), ('DCM files', '*.dcm'), ('MHD files', '*.mhd')]\n\n\tif(filename==None):\tfile_path = tkFileDialog.askopenfilename(initialdir = dir_ini, filetypes = types)\n\telse:\tfile_path = filename\n\n\tfilename_dosi = file_path\n\n\tprint('Opening RD file ...')\n\n\t### .dcm file ###\n\tif(file_path.endswith('.dcm')):\n\t\tds = pydicom.read_file(file_path)\n\t\tds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian \n\t\tscaling_dosi = float(ds.DoseGridScaling)\n\t\tdosi = scaling_dosi*ds.pixel_array\n\t\tsp = ds.PixelSpacing\n\t\tspacing_dosi = [ float(ds.GridFrameOffsetVector[1] - ds.GridFrameOffsetVector[0]), float(sp[1]),float(sp[0])]\n\t\torigin_dosi = ds.ImagePositionPatient\n\t\torigin_dosi = [float(origin_dosi[2]),float(origin_dosi[1]),float(origin_dosi[0])]\n\t\tdosi_swapZ =(ds.ImageOrientationPatient[0:3] == [1, 0, 0])\n dosi_swapY =(ds.ImageOrientationPatient[3:6] == [0, 1, 0])\n\n\t\t#if ds.SeriesDescription=='PatientLETScorer [MeV/mm/(g/cm3)]':\tSetIntensityRange(dosi,0,15)\n\n\t### .mhd file ###\n\tif(file_path.endswith('.mhd')):\t\n \t\titkimage = sitk.ReadImage(file_path) \t\t\t\t# Reads the image using SimpleITK\n \t\tdosi = sitk.GetArrayFromImage(itkimage)\n\t\tspacing_dosi = np.array(list(reversed(itkimage.GetSpacing()))) \t# Read the spacing along each dimension\n\t\torigin_dosi = np.array(list(reversed((itkimage.GetOrigin()))))\t\t# Read the origin\n\t\ttext_file = open(file_path, \"r\")\n\t\ttmp = text_file.readlines()\n\t\tdosi_swap = (tmp[8][-4:-1] == 'RAI')\n\n\tif(len(np.shape(volume))==3):\tdim_x_dosi, dim_y_dosi, dim_z_dosi = np.shape(dosi)[0], np.shape(dosi)[1], np.shape(dosi)[2]\n\n\tif(len(np.shape(volume))==2):\tdim_x_dosi, dim_y_dosi, dim_z_dosi = np.shape(dosi)[0], np.shape(dosi)[1], 1\n\n\t#print 'dosi type', dosi.dtype\n\t\n\t# Dealing with image orientation\n\tif(dosi_swapY == True):\n\t\tdosi = np.flip(dosi,1) # flip volume\n\t\torigin_dosi[1] = origin_dosi[1] + dim_y_dosi*spacing_dosi[1]\t\t\n\tif(dosi_swapZ == True):\n\t\tdosi = np.flip(dosi,2) # flip volume\n\t\torigin_dosi[2] = origin_dosi[2] + dim_z_dosi*spacing_dosi[2]\n\tif(dosi_swapY == True)and(dosi_swapZ == True):\n\t\tspacing_dosi[1], spacing_dosi[2] = spacing_dosi[2], spacing_dosi[1]\n\n print ' dosi_swapY, dosi_swapZ :', dosi_swapY, dosi_swapZ\n\n\tdosi_open = True\n\tisodose_show = True\n\tcheck1.select()\n\tUpdate_all()\n\n\tprint(' file successfully opened!')", "def test_read(self):\n self.assertArrayEqual(self.dset['a'], self.data['a'])", "def test_open_file(self):\n\t\tposition, potential = schrodinger.open_file('potential_energy.dat')\n\t\tself.assertEqual(position, [0.0, 1.57079, 3.14159, 4.71238, 6.28318, 7.85398, 9.42477])\n\t\tself.assertEqual(potential, [0.0, 6.0, 0.0, -6.0, 0.0, 6.0, 0.0])", "def readOutputfile(filename, verbose=False):\n\n # -----------------------------------------------------------------------------\n # Defining the classes for data structure\n T_Simulation = namedtuple('Simulation', ['step'])\n T_Step = namedtuple('Step', ['element', 'node'])\n\n T_Displacement = namedtuple('Displacement', ['ux', 'uy'])\n\n T_Element = namedtuple('Element', ['gp', 'avstrain', 'avstress', 'eqstrain'])\n T_GP = namedtuple('GP', ['stress', 'strain'])\n T_Stresses = namedtuple('Stresses', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n T_Strains = namedtuple('Strains', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n # -----------------------------------------------------------------------------\n\n nSteps = 0 # Simulation step counter\n\n SimData = T_Simulation(list())\n\n with open(filename) as f:\n line = f.readline() # Read in the first line of the input file\n while True: # Loop over all lines of the input file\n # Read the nodes displacements\n #line = f.readline()\n #print(line)\n if line == 'DofManager output:\\n': # String starts a list of nodes displacement information\n nSteps += 1 # The above string starts a new simulation step\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Nodes = list() # Initialize/clear list of nodes\n\n while line != '\\n' and line != 'Element output:\\n': # Strings that finish the list\n #\t\t\t\tnNode = int(line.strip().split()[1]) # Node id\n line = f.readline()\n dim1 = float(line.strip().split()[3]) # Displacement dim1\n line = f.readline()\n dim2 = float(line.strip().split()[3]) # Displacement dim2\n Nodes.append(\n T_Displacement(dim1, dim2)) # Append displacements of the current node to the node list\n line = f.readline()\n\n\n if verbose:\n print('Step {}: Dofs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n # Read the stresses an strains at Gauss points\n elif line == 'Element output:\\n': # String starts a list elements, GPs, strains and stresses\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Elements = list() # Initialize/clear list of elements\n\n while line != '\\n' and line != '\\tR E A C T I O N S O U T P U T:\\n': # Strings that finish the list\n #\t\t\t\t\tnElement = line.strip().split()[2] # Element id\n line = f.readline()\n GPs = T_Element(list(), 0, 0, 0) # List of Gauss points\n\n while line != '\\n' and line.strip().split()[0] == 'GP': # String that starts a new GP\n #\t\t\t\t\t\tnGP = int(line.strip().split()[1].split('.')[1]) # GP id\n tmp = [float(i) for i in line.strip().split()[4:10]] # Read the strains\n strain = T_Strains(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n line = f.readline()\n tmp = [float(i) for i in line.strip().split()[1:7]] # Read the stresses\n stress = T_Stresses(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n GPs.gp.append(\n T_GP(stress, strain)) # Append stresses and strains of the current GP to the GP list\n line = f.readline()\n\n\n Elements.append(GPs) # Append GP list of the current element to the element list\n\n if verbose:\n print('Step {}: GPs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n SimData.step.append(T_Step(Elements, Nodes)) # Append element and node list of the current step to the step list\n #print('the file input ends')\n #print(nSteps)\n # only needed with a while loop\n # Jump over the lines until we reach the next time step (Caught by if-clause)\n try:\n line = f.readline() # Will generate an error if files end is reached\n if line == \"\":\n raise EOFError\n except:\n if verbose: print(\"End of file reached.\\n\")\n break # Break the 'while True' loop\n\n # -----------------------------------------------------------------------------\n\n\n print('averaging the stress')\n # Averaging of strains and stress of GPs of each element\n for istep in range(len(SimData.step)):\n\n for ielement in range(len(SimData.step[istep].element)):\n print(len)\n # Initialization before each element\n stresses = np.array([0., 0., 0., 0., 0., 0.])\n strains = np.array([0., 0., 0., 0., 0., 0.])\n\n for igp in range(len(SimData.step[istep].element[ielement])):\n print(igp)\n # Add up all data of all GPs\n #stresses[:] += SimData.step[istep].element[ielement].gp[igp].stress[:]\n strains[:] += SimData.step[istep].element[ielement].gp[igp].strain[:]\n\n # Divide GP sum by number of GPs\n stresses /= len(SimData.step[istep].element[ielement])\n strains /= len(SimData.step[istep].element[ielement])\n # Replace the field (initialized with 0) with new information\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstress=T_Stresses(stresses[0], stresses[1], stresses[2], stresses[3], stresses[4], stresses[5]))\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstrain=T_Strains(strains[0], strains[1], strains[2], strains[3], strains[4], strains[5]))\n print('Analysis finished')\n return SimData", "def test_ordinary(self):\n date = datetime(2016, 11, 12)\n seq = 36\n name = star_barcode.barcode_filename(date, seq)\n self.assertEqual(\n name,\n 'Barcode_2016-W45-6_36.pdf'\n )", "def test_pert_file(self):\n path, case = os.path.split(self.ieee14)\n\n # --- with pert file ---\n ss = andes.run('ieee14.raw', pert='pert.py',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNotNone(ss.TDS.callpert)\n\n # --- without pert file ---\n ss = andes.run('ieee14.raw',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNone(ss.TDS.callpert)", "def test_conversion_lowlevel(path):\n gff_file = gff.GFFFile.read(join(data_dir(\"sequence\"), path))\n ref_entries = [entry for entry in gff_file]\n\n gff_file = gff.GFFFile()\n for entry in ref_entries:\n gff_file.append(*entry)\n temp = TemporaryFile(\"w+\")\n gff_file.write(temp)\n\n temp.seek(0)\n gff_file = gff.GFFFile.read(temp)\n temp.close()\n test_entries = [field for field in gff_file]\n assert test_entries == ref_entries", "def test_fileobj_not_closed(self):\n\n f = open(self.data(\"test0.fits\"), \"rb\")\n _ = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n _ = fits.getheader(f)\n assert not f.closed\n\n f.close() # Close it now", "def open_read_write(file_to_open=PROBLEM_FILE):\n\n with open(file_to_open, 'r') as working_file:\n # turns the opened file into a list using a list comprehension\n working_file = [sentence.strip() for sentence in working_file.readlines()]\n\n # iterates through the enumerated file displaying a tuple\n # of (index, string)\n for sentence in enumerate(working_file): #iterates through the file\n if sentence[0] % 2: #Checks to see if the index is divisible by 2\n print(sentence[1]) #prints the string", "def test_derivs(self):\n cr = CaseReader(self.filename)\n last_case = cr.get_case(-1)\n self.assertIsNone(last_case.derivs,\n \"Case erroneously contains derivs.\")", "def testFileInRead(self, mockPath):\n mockPath.return_value = 'bananaphone.ccc'\n self.node = cdl_convert.ColorCollection(input_file='mybestfile.ccc')\n\n mockPath.assert_called_once_with('mybestfile.ccc')\n\n self.assertEqual(\n 'bananaphone.ccc',\n self.node.file_in\n )", "def test_irr_read(irregular_written_data):\n\n fp, written = irregular_written_data\n with openEDF(fp) as reader:\n arr = reader.read(0)\n #imprecision due to 2-byte conversion so tolerance set to 1 unit\n assert np.allclose(written, arr, equal_nan=True, atol=1)", "def assertState(self, files):\n\n for path, expected in files.iteritems():\n fullpath = os.path.join(self._dir, path)\n self.assertFile(fullpath, path)\n with open(fullpath, 'r') as file:\n actual = file.read() \n self.assertEqual(actual, expected)", "def test_make_fna(self):\r\n fna_fp = os.path.join(self.sff_dir, 'test.fna')\r\n fna_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.fna')\r\n make_fna(self.sff_fp, fna_fp)\r\n make_fna(self.sff_gz_fp, fna_gz_fp)\r\n self.assertEqual(open(fna_fp).read(), fna_txt)\r\n self.assertEqual(open(fna_gz_fp).read(), fna_txt)", "def read_and_test_file_content(self): # pragma: no cover\n\n # We print the CLI header.\n PyFunceble.CLICore.print_header()\n\n with open(self.file, \"r\", encoding=\"utf-8\") as file:\n # We open the file we have to test.\n\n for line in self._get_list_to_of_subjects_to_test_from_file(file):\n # We loop through the file decoded file\n # content.\n\n # We test the line.\n self._test_line(line)\n\n for index, line in self.mining.list_of_mined():\n # We loop through the list of mined domains\n # (if the mining subystem is activated.)\n\n # We test the line.\n self._test_line(line)\n # and remove the currently tested line\n # from the mining database.\n self.mining.remove(index, line)\n\n for subject in self.get_complements():\n # We loop through the list of complements.\n\n # We test the complement.\n self._test_line(subject)\n\n # We inform all subsystem that we are not testing for complements anymore.\n self.complements_test_started = False\n\n # We update the counters\n self.autocontinue.update_counters()\n # We clean the autocontinue subsystem, we finished\n # the test.\n self.autocontinue.clean()\n # We process the autosaving if necessary.\n self.autosave.process(test_completed=True)\n # We close the database connection\n if self.sqlite_db.authorized:\n self.sqlite_db.connection.close()\n if self.mysql_db.authorized:\n self.mysql_db.get_connection().close()", "def test_fileOperations(self): \n from DIRAC import gConfig\n testSE = 'testSE'\n #rssClient = ResourceStatusClient()\n #result = rssClient.getStorageElementsList( 'Read' )\n #result = gConfig.getSections( '/Resources/StorageElements' )\n #if result['OK'] and result['Value']:\n # testSE = result['Value'][0]\n #if result['Ok']:\n # testSE = result['Value'][ 0 ] \n \n result = self.fc.addFile( { testFile: { 'PFN': 'testfile', \n 'SE': testSE , \n 'Size':0, \n 'GUID':0, \n 'Checksum':'0' } } )\n self.assert_( result['OK'] )\n if gConfig.getValue( '/Resources/StorageElements/%s/AccessProtocol.1/Host' % testSE, '' ):\n result = self.fc.getReplicas( testFile )\n self.assert_( result['OK'] )\n self.assert_( testFile in result['Value']['Successful'] )", "def test_process_file():\r\n\r\n ## Once you have process_file \"working\", uncomment the 5 lines starting with\r\n ## file1 = \"\"\r\n ## Set kdiffexe to the path to kdiff3.exe on your computer\r\n ## Set file1 and file2 to the original and fixed eversion of the files\r\n ## This should provide a nice visual comparison of the file with and\r\n ## without the fix. In general, this is not a good practice for testing.\r\n ## That is, launching an external applicaiton to show the results.\r\n ##\r\n # file1 = \"\"\r\n # file2 = \"\"\r\n # kdiffexe = '\"C:\\Program Files\\KDiff3\\kdiff3.exe\"'\r\n # cmd = r'{} {} {}'.format(kdiffexe, file1, file2)\r\n # os.system(cmd)\r\n fh.process_file()", "def test_read_input_file(self):\n\n test_max_digit = 2\n tuple1 = self.test_raw_tuple\n tuple2, max_digit = read_input_file(self.test_drug_info_file)\n self.assertEqual(tuple1, tuple2)\n self.assertAlmostEqual(max_digit,test_max_digit)", "def test_can_create_fa_dfl(\n pages: conftest.UserPages, sample_upload_file: types.FilePayload\n) -> None:\n\n with pages.imp_page() as imp_page:\n dfl_id = fa_dfl_create(imp_page, sample_upload_file)\n\n with pages.ilb_page() as ilb_page:\n fa_dfl_manage_and_complete_case(ilb_page, dfl_id)", "def test_file_field():" ]
[ "0.59925705", "0.59771883", "0.59220266", "0.5896933", "0.58921814", "0.58638084", "0.57837987", "0.5661089", "0.56471264", "0.56424177", "0.5633926", "0.56308526", "0.5623441", "0.5612273", "0.56118274", "0.56060433", "0.5597619", "0.55867624", "0.557899", "0.5573398", "0.55718875", "0.55690295", "0.55538565", "0.5505405", "0.5502898", "0.5486591", "0.5483567", "0.54779994", "0.5457131", "0.5438945", "0.54351866", "0.542571", "0.5408695", "0.5378154", "0.5374414", "0.5358395", "0.53453785", "0.5344981", "0.5336678", "0.53346276", "0.53280574", "0.53259873", "0.5317975", "0.5317791", "0.5310719", "0.530913", "0.53090686", "0.53002834", "0.5290266", "0.527335", "0.52580833", "0.52563727", "0.52484953", "0.5241584", "0.5239888", "0.52375776", "0.523584", "0.5234615", "0.5233701", "0.523055", "0.5227521", "0.52264696", "0.522613", "0.52220225", "0.5212241", "0.5208084", "0.52074236", "0.52032375", "0.5198244", "0.519398", "0.5193812", "0.5192464", "0.5180411", "0.5176407", "0.5174323", "0.5172295", "0.5172052", "0.5171112", "0.5168009", "0.51648694", "0.5158724", "0.51578146", "0.5153783", "0.51517904", "0.51502866", "0.51492435", "0.5147022", "0.51268697", "0.5114887", "0.5114586", "0.51124454", "0.51059085", "0.51056796", "0.51006377", "0.50991464", "0.5094975", "0.50931984", "0.5092014", "0.5088746", "0.5086057", "0.50845885" ]
0.0
-1
Made before reading config file (config file located in args.root). Check and normalize root path.
def setup_part1(args): args.rootarg = args.root rootext = os.path.splitext(args.rootarg)[1] if rootext == '': pass else: args.root = os.path.dirname(args.root) if args.root: args.root = os.path.abspath(args.root) if not os.path.isdir(args.root): if args.gallery: os.mkdir(args.root) else: error('Directory not found', args.root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config_file(self):\n\n conf_file = self.args.file\n if conf_file is not None:\n if os.path.isfile(conf_file):\n config_file = open(conf_file, \"r\")\n self.main_file = yaml.load(config_file, Loader=yaml.FullLoader)\n elif os.path.isfile(\n os.path.join(get_path(\"DEFAULT\", \"config_file_path\"), conf_file)\n ):\n fpath = get_path(\"DEFAULT\", \"config_file_path\")\n config_file = open(os.path.join(fpath, conf_file), \"r\")\n self.main_file = yaml.load(config_file, Loader=yaml.FullLoader)\n else:\n self.logger.error(\n colorama.Fore.RED\n + \"ERROR!! Config file '%s' is not present \" % conf_file,\n extra=self.log_detail,\n )\n sys.exit(1)\n else:\n if self.args.hostname and self.args.testfiles:\n temp_dict = {\n \"hosts\": [{\"device\": \"\", \"username\": \"\", \"passwd\": \"\"}],\n \"tests\": [],\n }\n temp_dict[\"hosts\"][0][\"device\"] = self.args.hostname\n temp_dict[\"hosts\"][0][\"username\"] = self.args.login\n temp_dict[\"hosts\"][0][\"passwd\"] = self.args.passwd\n for tfile in self.args.testfiles:\n temp_dict[\"tests\"].append(tfile)\n self.main_file = temp_dict\n\n if (\n self.main_file.__contains__(\"sqlite\")\n and self.main_file[\"sqlite\"]\n and self.main_file[\"sqlite\"][0]\n ):\n self.chk_database(\n self.main_file,\n self.args.pre_snapfile,\n self.args.post_snapfile,\n self.args.check,\n self.args.snap,\n )\n else:\n # if --check option is given for sqlite, then snap file name is not compulsory\n # else exit the function saying arguments not correct\n if self.args.check is True and (\n self.args.pre_snapfile is None or self.args.post_snapfile is None\n ):\n self.logger.error(\n colorama.Fore.RED\n + \"Arguments not given correctly, Please refer help message\",\n extra=self.log_detail,\n )\n self.parser.print_help()\n sys.exit(1)", "def pre_config_root_read(self, resource_id):\n pass", "def test_load_config_safe(self):\n self.__test_load_config_safe(\".scuba.yml\")", "def readConfigFile( self, fileName ):\n try:\n self.analysisBox.readConfig( fileName )\n path, name = os.path.split( fileName )\n self.rsrc.lastFolder = path\n self.console.info( 'Read full config file %s\\n' % fileName )\n except IOError, ValueError:\n self.console.error( 'Error reading full config file %s\\n' % fileName )", "def set_rootdir(configdict, config_file):\n if 'rootdir' not in configdict or not configdict['rootdir']:\n configdict['rootdir'] = os.path.dirname(config_file)", "def validate_paths(self):\n # When we're started with a *.qemuboot.conf arg assume that image\n # artefacts are relative to that file, rather than in whatever\n # directory DEPLOY_DIR_IMAGE in the conf file points to.\n if self.qbconfload:\n imgdir = os.path.realpath(os.path.dirname(self.qemuboot))\n if imgdir != os.path.realpath(self.get('DEPLOY_DIR_IMAGE')):\n logger.info('Setting DEPLOY_DIR_IMAGE to folder containing %s (%s)' % (self.qemuboot, imgdir))\n self.set('DEPLOY_DIR_IMAGE', imgdir)\n\n # If the STAGING_*_NATIVE directories from the config file don't exist\n # and we're in a sourced OE build directory try to extract the paths\n # from `bitbake -e`\n havenative = os.path.exists(self.get('STAGING_DIR_NATIVE')) and \\\n os.path.exists(self.get('STAGING_BINDIR_NATIVE'))\n\n if not havenative:\n if not self.bitbake_e:\n self.load_bitbake_env()\n\n if self.bitbake_e:\n native_vars = ['STAGING_DIR_NATIVE']\n for nv in native_vars:\n s = re.search('^%s=\"(.*)\"' % nv, self.bitbake_e, re.M)\n if s and s.group(1) != self.get(nv):\n logger.info('Overriding conf file setting of %s to %s from Bitbake environment' % (nv, s.group(1)))\n self.set(nv, s.group(1))\n else:\n # when we're invoked from a running bitbake instance we won't\n # be able to call `bitbake -e`, then try:\n # - get OE_TMPDIR from environment and guess paths based on it\n # - get OECORE_NATIVE_SYSROOT from environment (for sdk)\n tmpdir = self.get('OE_TMPDIR')\n oecore_native_sysroot = self.get('OECORE_NATIVE_SYSROOT')\n if tmpdir:\n logger.info('Setting STAGING_DIR_NATIVE and STAGING_BINDIR_NATIVE relative to OE_TMPDIR (%s)' % tmpdir)\n hostos, _, _, _, machine = os.uname()\n buildsys = '%s-%s' % (machine, hostos.lower())\n staging_dir_native = '%s/sysroots/%s' % (tmpdir, buildsys)\n self.set('STAGING_DIR_NATIVE', staging_dir_native)\n elif oecore_native_sysroot:\n logger.info('Setting STAGING_DIR_NATIVE to OECORE_NATIVE_SYSROOT (%s)' % oecore_native_sysroot)\n self.set('STAGING_DIR_NATIVE', oecore_native_sysroot)\n if self.get('STAGING_DIR_NATIVE'):\n # we have to assume that STAGING_BINDIR_NATIVE is at usr/bin\n staging_bindir_native = '%s/usr/bin' % self.get('STAGING_DIR_NATIVE')\n logger.info('Setting STAGING_BINDIR_NATIVE to %s' % staging_bindir_native)\n self.set('STAGING_BINDIR_NATIVE', '%s/usr/bin' % self.get('STAGING_DIR_NATIVE'))", "def __read_config(self):\n with open(self.config_file, 'r') as data_file:\n dict = json.load(data_file)\n self.ibooks_doc_root = dict[\"ibooks_doc_root\"]\n self.library_folder = dict[\"library_folder\"]\n self.annotation_folder = dict[\"annotation_folder\"]\n self.tmp_dir = dict[\"tmp_dir\"]", "def init_config():\n config_file = create_config_file.CreateConfigFile()\n config_file.check_if_config_file_exists()", "def use_config_file(self):\n self.config_file = self.find_config_file()\n if self.config_file:\n self.apply_config_file(self.config_file)", "def init():\n for config_filename in _config_utils.CONFIG_FILENAMES:\n if os.path.isfile(config_filename):\n config_filepath = os.path.abspath(config_filename)\n click.echo(\"found existing config file {}\".format(config_filepath))\n return\n\n config_filepath = _config_utils.create_empty_config_file('.')\n click.echo(\"initialized empty config file {}\".format(config_filepath))", "def initialize_paths(self):\n for path in self.config[\"paths\"]:\n self.force_path_to_exist(self.config[\"paths\"][path])", "def _check_config(self):", "def load_config_file(self, suppress_errors=True):\n self.log.debug(\"Searching %s for config files\", self.config_file_paths)\n base_config = 'jupyter_config'\n try:\n super(JupyterApp, self).load_config_file(\n base_config,\n path=self.config_file_paths,\n )\n except ConfigFileNotFound:\n # ignore errors loading parent\n self.log.debug(\"Config file %s not found\", base_config)\n pass\n\n if self.config_file:\n path, config_file_name = os.path.split(self.config_file)\n else:\n path = self.config_file_paths\n config_file_name = self.config_file_name\n\n if not config_file_name or (config_file_name == base_config):\n return\n\n try:\n super(JupyterApp, self).load_config_file(\n config_file_name,\n path=path\n )\n except ConfigFileNotFound:\n self.log.debug(\"Config file not found, skipping: %s\", config_file_name)\n except Exception:\n # Reraise errors for testing purposes, or if set in\n # self.raise_config_file_errors\n if (not suppress_errors) or self.raise_config_file_errors:\n raise\n self.log.warning(\"Error loading config file: %s\" %\n config_file_name, exc_info=True)", "def get_cluster_config_path(root_dir, console_args):\n\n if os.path.isabs(console_args.cluster_config_path):\n return console_args.cluster_config_path\n return os.path.join(root_dir, console_args.cluster_config_path)", "def init():\n file_name = 'config.json'\n home_directory_path = str(Path.home())\n config_file_directory = home_directory_path+\"/.config/files/\"\n full_path = config_file_directory + file_name\n\n if os.path.isfile(full_path) and os.access(full_path, os.R_OK): # Readable Config file exists and is valid\n try:\n with open(full_path) as file:\n json_file = json.load(file)\n load_json_and_arguments(json_file)\n\n except ValueError as exception:\n raise ValueError(\"Invalid JSON configuration file\")\n\n elif not os.path.isfile(full_path): # Config file doesn't exist yet, create it\n\n if not os.path.exists(config_file_directory): # Make the directory if that doesn't exist as well\n os.makedirs(config_file_directory)\n\n get_account_info(full_path)\n\n else:\n raise IOError(\"Config file: \" + full_path + \" not accessible\")", "def _init_variables(self, config_root):\n if not config_root and 'EXCONF_CONFIG_ROOT' in os.environ:\n config_root = os.environ['EXCONF_CONFIG_ROOT']\n if not config_root and EXCONF_CONFIG_FILE_NAME in os.listdir('.'):\n config_root = '.'\n if not config_root:\n raise ValueError(\"Exconf configuration root not defined. Give -c option, or define \"\n \"environment variable EXCONF_CONFIG_ROOT.\")\n if not EXCONF_CONFIG_FILE_NAME in os.listdir(config_root):\n raise ValueError(\"No {} file found from configuration root: {}\"\n .format(EXCONF_CONFIG_FILE_NAME, config_root))\n config_root = os.path.abspath(config_root)\n exconf_config_file_path = os.path.join(config_root, EXCONF_CONFIG_FILE_NAME)\n LOG.debug(\"Reading Exconf configuration from: {}\", exconf_config_file_path)\n self.config_vars = read_yaml(exconf_config_file_path)\n self.config_vars[EXCONF_VAR_CONFIG_ROOT] = config_root\n self.resolved_vars = None", "def do_root(self, line):\n self.root_directory = line\n if self.source_file:\n self.source_file = self.root_directory + \"/\" + self.source_file\n print(f\"Root directory to read & write files is: {line}\")", "def __init__(self):\n\n self.root_path = os.path.dirname(os.path.abspath(__file__))[:-5]\n self.config_path = os.path.join(self.root_path, \"files\\\\CONFIG.txt\")\n self.metrics_path = os.path.join(self.root_path, \"files\\\\metrics.txt\")\n\n self.setup_metrics_file()\n\n if self.check_configuration() is False:\n self.setup_configuration_file()", "def config(filename, root_path):\n cnf = _config('/etc/%s' % filename)\n if cnf is False:\n filename = os.path.join(root_path, filename)\n cnf = _config(filename)\n return cnf", "def _loadconfig(self):\n\n # Get the Topology, from the topology layout file\n topo = {}\n with open(self._autoconfig_filename, \"r\") as stream:\n try:\n topo = yaml.load(stream)\n if \"metadata\" in topo:\n self._metadata = topo[\"metadata\"]\n except yaml.YAMLError as exc:\n raise RuntimeError(\n \"Couldn't read the Auto config file {}.\".format(\n self._autoconfig_filename, exc\n )\n )\n\n systemfile = self._rootdir + self._metadata[\"system_config_file\"]\n if self._clean is False and os.path.isfile(systemfile):\n with open(systemfile, \"r\") as sysstream:\n try:\n systopo = yaml.load(sysstream)\n if \"nodes\" in systopo:\n self._nodes = systopo[\"nodes\"]\n except yaml.YAMLError as sysexc:\n raise RuntimeError(\n \"Couldn't read the System config file {}.\".format(\n systemfile, sysexc\n )\n )\n else:\n # Get the nodes from Auto Config\n if \"nodes\" in topo:\n self._nodes = topo[\"nodes\"]\n\n # Set the root directory in all the nodes\n for i in self._nodes.items():\n node = i[1]\n node[\"rootdir\"] = self._rootdir", "def modify_base_buildroot(self):\n if \"'%s '\" % self.buildroot_pkgs != pipes.quote(str(self.buildroot_pkgs)+' '):\n # just different test if it contains only alphanumeric characters allowed in packages name\n raise BuilderError(\"Do not try this kind of attack on me\")\n self.root_conn.module_name = \"lineinfile\"\n self.root_conn.module_args = \"\"\"dest=/etc/mock/%s.cfg line=\"config_opts['chroot_setup_cmd'] = 'install @buildsys-build %s'\" regexp=\"^.*chroot_setup_cmd.*$\" \"\"\" % (self.chroot, self.buildroot_pkgs)\n self.mockremote.callback.log('putting %s into minimal buildroot of %s' % (self.buildroot_pkgs, self.chroot))\n results = self.root_conn.run()\n\n is_err, err_results = check_for_ans_error(results, self.hostname, success_codes=[0],\n return_on_error=['stdout', 'stderr'])\n if is_err:\n self.mockremote.callback.log(\"Error: %s\" % err_results)\n myresults = get_ans_results(results, self.hostname)\n self.mockremote.callback.log(\"%s\" % myresults)", "def load_config(self, cfg: ConfigParser):\n log.debug('loading configuration file')\n section = cfg['DEFAULT']\n # use host-specific configuration, if any\n if self.hostname in cfg:\n section = cfg[self.hostname]\n self.path = os.path.abspath(os.path.expanduser(section['repo_dir']))\n self.gpg_key_id = section['gpg_key_id']\n self.ignored_files = section['ignored_files'].split(',')\n self.ignored_files.append('.gitkeep')", "def check_config_file():\n # Locate and init config.\n default_config = \"config.json\"\n if len(sys.argv) == 2:\n # config from command line\n app_config = config_reader(sys.argv[1])\n else:\n # config should be in default\n app_config = config_reader(default_config)\n # fin\n if not app_config:\n print(\"Exiting due to invalid config file.\")\n return False\n # fin\n return app_config", "def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")", "def test_no_recognized_root(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"wrong_root.agent_name\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The root of the dotted path must be one of: {}\".format(\n ALLOWED_PATH_ROOTS\n )\n )", "def load_config(self):\n pass", "def __check_config(self):\n if not os.path.exists(self.__config_path):\n return False\n else:\n return True", "def setup_config_folder(self):\n\n config_dir = self.config_folder\n \n logging.info(\"using config folder: %s\" % (config_dir))\n if not os.path.isdir(config_dir):\n os.mkdir(config_dir)\n\n try:\n f = open(config_dir + self.wordlist_file,'r')\n except IOError:\n f = open(config_dir + self.wordlist_file,'a+')\n f.close()\n\n try:\n f = open(config_dir + self.score_file,'r')\n except IOError:\n f = open(config_dir + self.score_file,'a+')\n f.close()", "def read_config(self, config_filename):", "def load_settings_from_file(self, cfg_file):\n \n #\n #\n # TODO\n # Missing settings should not cause exceptions\n #\n #\n #\n\n if not os.path.exists(cfg_file): \n raise Exception('Provided config file [%s] does not exist or cannot be read.' % cfg_file)\n\n import ConfigParser\n config = ConfigParser.ConfigParser()\n config.read(cfg_file)\n \n \n self.reference_root = config.get('Paths','reference-root')\n \n self.scratch_root = os.getcwd()\n try:\n self.scratch_root = config.get('Paths','scratch-root')\n except ConfigParser.NoOptionError:\n self.logger.info('Scratch-root setting is missing. Using current directory: %s' % self.scratch_root)\n\n\n if (self.run_folder != None):\n self.run_id = os.path.basename(self.run_folder)\n else:\n raise Exception('Set runfolder with PipelineConfig.set_runfolder() before loading settings')\n \n \n #\n # TODO\n # needs to be updated on update of settings\n #\n self.runs_scratch_dir = os.path.join(self.scratch_root, self.run_id) if self.run_folder != None else self.scratch_root\n self.logger.info('Run\\'s scratch directory: %s' % self.runs_scratch_dir)\n \n # optional results and fastq archive dirs \n self.results_archive = None\n try:\n self.results_archive = config.get('Paths','results-archive')\n except ConfigParser.NoOptionError:\n self.logger.info('No results-archive provided. Results will not be archived outside of the run\\'s scratch directory.')\n \n self.fastq_archive = None\n try:\n self.fastq_archive = config.get('Paths','fastq-archive')\n except ConfigParser.NoOptionError:\n self.logger.info('No fastq-archive provided. Fastq files will not be archived outside of the run\\'s scratch directory.')\n \n \n # optional /tmp dir\n self.tmp_dir = '/tmp'\n try:\n self.tmp_dir = config.get('Paths','tmp-dir')\n except ConfigParser.NoOptionError:\n self.logger.info('No tmp-dir provided. /tmp will be used.')\n \n \n \n \n # reference files\n self.reference = os.path.join(self.reference_root, config.get('Resources','reference-genome'))\n self.capture = os.path.join(self.reference_root, config.get('Resources','capture-regions-bed'))\n self.capture_qualimap = os.path.join(self.reference_root, config.get('Resources','capture-regions-bed-for-qualimap'))\n self.capture_plus = os.path.join(self.reference_root, config.get('Resources', 'capture-plus-regions-bed'))\n self.gene_coordinates = os.path.join(self.reference_root, config.get('Resources', 'gene-coordinates'))\n \n self.adapters = os.path.join(self.reference_root, config.get('Resources', 'adapters-fasta'))\n \n # tools\n self.bcl2fastq = config.get('Tools','bcl2fastq')\n self.trimmomatic = config.get('Tools','trimmomatic') \n self.bwa = config.get('Tools','bwa')\n self.samtools = config.get('Tools','samtools')\n self.picard = config.get('Tools','picard')\n self.gatk = config.get('Tools','gatk')\n self.freebayes = config.get('Tools','freebayes')\n self.bcftools = config.get('Tools','bcftools')\n self.qualimap = config.get('Tools','qualimap')\n \tself.fastqc\t = config.get('Tools','fastqc')\n\n\n # annovar settings\n self.convert_to_annovar = os.path.join(config.get('Annovar','annovar_home'), \n config.get('Annovar','convert_to_annovar'))\n self.annovar_annotate = os.path.join(config.get('Annovar','annovar_home'),\n config.get('Annovar','annovar_annotate'))\n self.table_annovar = os.path.join(config.get('Annovar','annovar_home'), \n config.get('Annovar','table_annovar'))\n self.annovar_human_db = os.path.join(config.get('Annovar','annovar_home'),\n config.get('Annovar','annovar_human_db'))\n self.annovar_1000genomes_eur = config.get('Annovar','annovar_1000genomes_eur')\n self.annovar_1000genomes_eur_maf_cutoff = config.get('Annovar','annovar_1000genomes_eur_maf_cutoff')\n self.annovar_inhouse_dbs = config.get('Annovar','annovar_inhouse_dbs')\n self.omim_gene_phenotype_map_file = config.get('Annovar','omim_gene_phenotype_map_file')", "def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))", "def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))", "def parse_config(self):\n # TODO: parse config file\n pass", "def __init__(self, config):\n\n self.root = config.root\n self.pidfile = config.pidfile\n self.log_conf = config.logging", "def test_config_overwrites():\n basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", '..'))\n temppath = '/tmp/'\n\n conf = core.Config(datapath=temppath)\n\n assert conf.basepath.lower() == basepath.lower()\n assert conf.datapath.lower() == temppath.lower()", "def test_config(self):\n if self.config.get('base_url')[-1] is '/':\n raise exceptions.ScidashClientWrongConfigException('Remove last '\n 'slash '\n 'from base_url')", "def cli(ctx, root):\n try:\n ctx.obj = create_initial_context(root)\n except SettingsBroken as e:\n click.echo(\n 'Failed to read the settings file: %s' % str(e),\n err=True\n )\n exit(1)", "def __init__(self, rootdir, filename, clean=False):\n self._autoconfig_filename = rootdir + filename\n self._rootdir = rootdir\n self._metadata = {}\n self._nodes = {}\n self._vpp_devices_node = {}\n self._hugepage_config = \"\"\n self._clean = clean\n self._loadconfig()\n self._sockfilename = \"\"", "def __init__(self, file_root):\n self.root = file_root", "def __init__(self, cfg_path):\n\t\tself.cfg_path = cfg_path\n\t\tself.cfg_root = self.load_cfg(self.cfg_path)", "def __post_init__(self, *args, **kwargs) -> None:\n super().__post_init__(*args, **kwargs)\n\n self._path = Path(source_file).resolve()\n\n super().__init__(length, length)\n\n if not self._path.exists():\n raise ConfigFileError(\n f\"The source file {self.path} does not exist...\")", "def __init__(self):\n\n self.path = os.path.dirname(os.path.realpath(__file__)) + '/config.ini'\n self.config = configparser.ConfigParser()\n self.config.read(self.path)", "def read( self, filename ):\n conf_file = cfg.ConfigFile(filename)\n self.root_node = conf_file.root_node", "def sanitize_config(config):\n\tif not 'dbfile' in config.keys():\n\t\tconfig['dbfile'] = DEFAULTCONFIG['dbfile']\n\tfor path in ['dbfile']:\n\t\tconfig[path]=os.path.expanduser(config[path])\n\treturn config", "def parse_args(self, argv):\n super(UpdaterDaemon, self).parse_args(argv)\n\n self.stdout = self.options.log_file\n self.stderr = self.options.error_log\n\n config = self.options.config\n if config is None:\n config = os.path.join(os.path.dirname(__file__), 'config.py')\n config = os.path.normpath(os.path.abspath(config))\n configdir, configfile = os.path.split(config)\n configfile, ext = os.path.splitext(configfile)\n if configdir not in sys.path:\n sys.path.insert(0, configdir)\n self.config = __import__(configfile)", "def setUp(self):\n super(LaunchConfigPersonalityTest, self).setUp()\n self.path = '/root/test.txt'", "def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)", "def _load_config(self, args: argparse.Namespace):\n #\n # Load a config, filename may or may-not be provided...\n #\n try:\n self._config = TortugaScriptConfig.load(args.config)\n\n except ConfigException as ex:\n print(str(ex))\n sys.exit(0)\n\n #\n # Override the config with any provided argument values\n #\n if args.url:\n self._config.url = args.url\n if args.username:\n self._config.username = args.username\n if args.password:\n self._config.password = args.password\n if args.token:\n self._config.token = args.token\n self._config.verify = args.verify", "def _find_config_root(self) -> str:\n location = [\"apache2.conf\", \"httpd.conf\", \"conf/httpd.conf\"]\n for name in location:\n if os.path.isfile(os.path.join(self.root, name)):\n return os.path.join(self.root, name)\n raise errors.NoInstallationError(\"Could not find configuration root\")", "def __init__(self, config_file):\n \n self.log = logging.getLogger(__name__)\n\n self.parser = ConfigParser.ConfigParser()\n if os.path.exists(config_file) and os.path.isfile(config_file):\n self.parser.read(config_file)\n self.log.debug(\"opened configuration '%s'\" % config_file)\n else:\n raise ConfigError(\"Config file missing\", \"File '%s' doesn't exist.\" % (config_file))\n\n self.config_file = config_file\n self.check_config()", "def test_no_recognized_root(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"wrong_root.agent_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The root of the dotted path must be one of: {}\".format(\n ALLOWED_PATH_ROOTS\n )\n )", "def checkRequiredConfigs(self):\n containmentFolder = self.getAbsContainmentFolder()\n rootFileName = self.app.config.exhale_args[\"rootFileName\"]\n rootFileTitle = self.app.config.exhale_args[\"rootFileTitle\"]\n doxygenStripFromPath = self.app.config.exhale_args[\"doxygenStripFromPath\"]\n\n # validate that the containmentFolder was created\n assert os.path.isdir(containmentFolder)\n # validate that {containmentFolder}/{rootFileName} was created\n assert os.path.isfile(os.path.join(containmentFolder, rootFileName))\n # validate that the title was included\n with open(os.path.join(containmentFolder, rootFileName), \"r\") as root:\n root_contents = root.read()\n root_heading = \"{0}\\n{1}\".format(\n rootFileTitle,\n exhale.utils.heading_mark(rootFileTitle, exhale.configs.SECTION_HEADING_CHAR)\n )\n assert root_heading in root_contents\n\n # TODO: validate doxygenStripFromPath\n if doxygenStripFromPath: # this is only here to avoid a flake8 fail on a todo\n pass", "def test_read_namespaced_build_config(self):\n pass", "def __setup_config_file_abspath():\n if \"APPDATA\" in os.environ:\n basedir = os.environ[\"APPDATA\"]\n elif \"HOME\" in os.environ:\n basedir = os.environ[\"HOME\"]\n else:\n raise AssertionError(\"APPDATA or HOME env vars must be defined \"\n \"to store config file\")\n abs_dir_path = os.path.join(\n basedir, TestManager.APPDATA_SUBDIRECTORY_NAME)\n os.makedirs(abs_dir_path, exist_ok=True, mode=0o660)\n return os.path.join(abs_dir_path, ConfigManager.CONFIG_FILE_NAME)", "def test_create_config_roots(self):\n with self.override_role():\n self._create_config_root()", "def __init__(self, filename, dirname='~'):\n self.config = configparser.ConfigParser()\n\n expanded_dirname = os.path.expanduser(dirname)\n self.configuration_filename = os.path.join(expanded_dirname, filename)\n if os.path.isfile(self.configuration_filename):\n self.config.read(self.configuration_filename)", "def test_default_config_file_paths(\n config,\n):\n assert \"~/.config/yessssms.conf\" in CONFIG_FILE_PATHS\n assert \"/etc/yessssms.conf\" in CONFIG_FILE_PATHS", "def setup_config(self):\n\n try:\n with open('config.json') as f:\n config = json.load(f)\n except FileNotFoundError as e:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR, e))\n print('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR, \n DFbase.LOG_INFO_COLOR, \n 'Please copy config.json.example to confing.json'))\n return False\n except Exception as e:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR, \n DFbase.LOG_INFO_COLOR, e))\n return False\n\n self.artifacts_path = config['ARTIFACTS']['BASE_PATH'].format(self.container_id)\n self.executable_path = config['ARTIFACTS']['EXECUTABLE_PATH']\n self.executable_path = self.executable_path.replace('BASE_PATH', self.artifacts_path)\n self.diff_files_path = config['ARTIFACTS']['DIFF_FILES_PATH']\n self.diff_files_path = self.diff_files_path.replace('BASE_PATH', self.artifacts_path)\n self.log_journald = (True if config['ARTIFACTS']['LOG_JOURNALD_SERVICE'] == \"TRUE\" else False)\n\n for x_path in [self.artifacts_path, self.executable_path, self.diff_files_path]:\n if not os.path.exists(x_path):\n try:\n os.makedirs(x_path, mode=0o700)\n except Exception as e:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR, e))\n print('{}[*]{} {}:{}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR,\n 'Failed when creating directory', x_path))\n return False\n\n return True", "def init_config(self):\n # self.config.read(self.cnfgfile)\n if not self.config.has_section(VERSION_SECTION):\n self.config.add_section(VERSION_SECTION)", "def pre_config_root_create(self, resource_dict):\n pass", "def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()", "def AutomagicalSettings(self):\n # Try to find gclient or repo root first.\n if not self.options.no_search:\n self.toplevel_root = gclient_utils.FindGclientRoot(self.checkout_root)\n if self.toplevel_root:\n logging.info('Found .gclient at %s' % self.toplevel_root)\n else:\n self.toplevel_root = gclient_utils.FindFileUpwards(\n os.path.join('..', '.repo'), self.checkout_root)\n if self.toplevel_root:\n logging.info('Found .repo dir at %s'\n % os.path.dirname(self.toplevel_root))\n\n # Parse TRYSERVER_* settings from codereview.settings before falling back\n # on setting self.options.root manually further down. Otherwise\n # TRYSERVER_ROOT would never be used in codereview.settings.\n self._GclStyleSettings()\n\n if self.toplevel_root and not self.options.root:\n assert os.path.abspath(self.toplevel_root) == self.toplevel_root\n self.options.root = gclient_utils.PathDifference(self.toplevel_root,\n self.checkout_root)\n else:\n self._GclStyleSettings()", "def onLoadConfig(self, inifile):\n cp = ConfigParser(self.defaults)\n cp.readfp(inifile)\n depth = self.getDepth(cp)\n self.baseurl = urljoin(self.inipath, depth)\n # create child loaders for any other l10n.ini files to be included\n try:\n for title, path in cp.items('includes'):\n # skip default items\n if title in self.defaults:\n continue\n # add child config parser\n self.addChild(title, path, cp)\n except NoSectionError:\n pass\n # try to load the \"dirs\" defined in the \"compare\" section\n try:\n self.dirs.extend(cp.get('compare', 'dirs').split())\n except (NoOptionError, NoSectionError):\n pass\n # try getting a top level compare dir, as used for fennec\n try:\n self.tld = cp.get('compare', 'tld')\n # remove tld from comparison dirs\n if self.tld in self.dirs:\n self.dirs.remove(self.tld)\n except (NoOptionError, NoSectionError):\n self.tld = None\n # try to set \"all_path\" and \"all_url\"\n try:\n self.all_path = cp.get('general', 'all')\n self.all_url = urljoin(self.baseurl, self.all_path)\n except (NoOptionError, NoSectionError):\n self.all_path = None\n self.all_url = None\n return cp", "def setup_local_config(self, file_path):\n try:\n shutil.copy(file_path, os.path.join(self.rundir, const.LOCAL_CONFIG_FILE))\n except OSError as e:\n raise ContainerError(\"Local config file provided errored out: {}\".format(e))", "def _build_config_file_path(cls, filename):\n if os.path.exists(filename):\n return filename\n res = os.path.join(os.path.dirname(__file__), '..', 'config', filename)\n if not os.path.exists(res):\n raise ValueError(\"requested config file %s does not exist!\" % filename)\n return res", "def __init__(self, appname):\n self.exepath = '%s' % (os.path.dirname(os.path.realpath(__file__)))\n self.cnfgfile = '%s/versions.cfg' % self.exepath\n self.static_path = '%s/app/static' % self.exepath\n self.config = ConfigParser.RawConfigParser()", "def read_config_file(self, path):\n config = configparser.ConfigParser(allow_no_value=True, delimiters=('='))\n config.optionxform = str\n\n if path:\n path_conf = os.path.abspath(path)\n Output.print_information(\"Reading configuration file \" + path_conf, True)\n if not os.path.isfile(path_conf):\n raise InvalidConfigException(\"The specified config file doesn't exist.\")\n config.read(path_conf)\n\n else:\n home_directory = os.path.expanduser(\"~\")\n cwd_conf = os.path.abspath(os.path.join(os.getcwd(), \"cryptodetector.conf\"))\n home_conf = os.path.abspath(os.path.join(home_directory, \"cryptodetector.conf\"))\n\n # Does config file exist in current working directory?\n if os.path.isfile(cwd_conf):\n Output.print_information(\"Reading configuration file \" + cwd_conf, True)\n self.options[\"config_file\"] = cwd_conf\n config.read(cwd_conf)\n\n # Does config file exist in home folder ?\n elif os.path.isfile(home_conf):\n Output.print_information(\"Reading configuration file \" + home_conf, True)\n self.options[\"config_file\"] = home_conf\n config.read(home_conf)\n\n else:\n Output.print_information(\"Didn't find any configuration file. Expect all \" \\\n + \"parameters from the command line.\", True)\n return\n\n for section in [\"settings\", \"methods\"]:\n if section not in config.sections():\n raise InvalidConfigException(\"Invalid configuration file. [\" \\\n + section + \"] section \" + \"is required.\")\n\n for option in self.options:\n\n if isinstance(self.options[option], list):\n option_value = Options.read_array_option(config, option)\n\n elif isinstance(self.options[option], bool):\n option_value = Options.read_boolean_option(config, \"settings\", option)\n\n else:\n option_value = Options.read_string_option(config, \"settings\", option)\n\n if option_value != None:\n self.options[option] = option_value\n\n for option in [\"methods\", \"packages\"]:\n if config.has_section(option):\n self.options[option] = [item for item, _ in config.items(option)]\n\n if self.options[\"output_existing\"] not in [\"rename\", \"overwrite\", \"skip\"]:\n raise InvalidConfigException(\"Invalid config file. In section [settings] \" \\\n + \"output_existing had invalid value '\" + self.options[\"output_existing\"] \\\n + \"'. Its value must be one of three choices: \" \\\n + \"'rename', 'overwrite', and 'skip'.\")\n\n if not self.options[\"methods\"]:\n raise InvalidConfigException(\"Invalid configuration file. There should be one \" \\\n + \"or more items under the [methods] section.\")\n\n methods = self.options[\"methods\"]\n Options.validate_methods(methods)\n\n for method in methods:\n method_class = Options.available_methods()[method]\n\n if not hasattr(method_class, \"options\"):\n continue\n\n for option in method_class.options:\n if isinstance(method_class.options[option], list):\n option_value = Options.read_array_option(config, \"method:\" \\\n + method + \":\" + option)\n\n elif isinstance(method_class.options[option], bool):\n option_value = Options.read_boolean_option(config, \"method:\" + method, option)\n\n else:\n option_value = Options.read_string_option(config, \"method:\" + method, option)\n\n if option_value != None:\n method_class.options[option] = option_value\n self.options[method + \"_\" + option] = option_value", "def get_base_config(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcp = self.config_parser\n\t\t# TODO: what happens if a command-line arg was already set?\n\t\t# BEGIN Read from config files\n\t\t# build - details relating to the build\n\t\tself.build['privileged'] = cp.getboolean('build', 'privileged')\n\t\tself.build['base_image'] = cp.get('build', 'base_image')\n\t\tself.build['dotest'] = cp.get('build', 'dotest')\n\t\tself.build['net'] = cp.get('build', 'net')\n\t\t# Take a command-line arg if given, else default.\n\t\tif self.build['conn_module'] is None:\n\t\t\tself.build['conn_module'] = cp.get('build', 'conn_module')\n\t\t# target - the target of the build, ie the container\n\t\tself.target['hostname'] = cp.get('target', 'hostname')\n\t\tself.target['ports'] = cp.get('target', 'ports')\n\t\tself.target['volumes'] = cp.get('target', 'volumes')\n\t\tself.target['volumes_from'] = cp.get('target', 'volumes_from')\n\t\tself.target['name'] = cp.get('target', 'name')\n\t\tself.target['rm'] = cp.getboolean('target', 'rm')\n\t\t# host - the host on which the shutit script is run\n\t\tself.host['add_shutit_to_path'] = cp.getboolean('host', 'add_shutit_to_path')\n\t\tself.host['docker_executable'] = cp.get('host', 'docker_executable')\n\t\tself.host['dns'] = cp.get('host', 'dns')\n\t\tself.host['password'] = cp.get('host', 'password')\n\t\tif isinstance(self.host['password'],str):\n\t\t\tshutit_global.shutit_global_object.secret_words_set.add(self.host['password'])\n\t\tself.logfile = cp.get('host', 'logfile')\n\t\tself.host['shutit_module_path'] = cp.get('host', 'shutit_module_path').split(':')\n\n\t\t# repository - information relating to docker repository/registry\n\t\tself.repository['name'] = cp.get('repository', 'name')\n\t\tself.repository['server'] = cp.get('repository', 'server')\n\t\tself.repository['push'] = cp.getboolean('repository', 'push')\n\t\tself.repository['tag'] = cp.getboolean('repository', 'tag')\n\t\tself.repository['export'] = cp.getboolean('repository', 'export')\n\t\tself.repository['save'] = cp.getboolean('repository', 'save')\n\t\tself.repository['suffix_date'] = cp.getboolean('repository', 'suffix_date')\n\t\tself.repository['suffix_format'] = cp.get('repository', 'suffix_format')\n\t\tself.repository['user'] = cp.get('repository', 'user')\n\t\tself.repository['password'] = cp.get('repository', 'password')\n\t\tif isinstance(self.repository['password'],str):\n\t\t\tshutit_global.shutit_global_object.secret_words_set.add(self.repository['password'])\n\t\tself.repository['email'] = cp.get('repository', 'email')\n\t\tself.repository['tag_name'] = cp.get('repository', 'tag_name')\n\t\t# END Read from config files\n\n\t\t# BEGIN tidy configs up\n\t\tif self.target['docker_image'] == '':\n\t\t\tself.target['docker_image'] = self.build['base_image']\n\t\t# END tidy configs up\n\n\t\t# FAILS begins\n\t\t# rm is incompatible with repository actions\n\t\tif self.target['rm'] and (self.repository['tag'] or self.repository['push'] or self.repository['save'] or self.repository['export']): # pragma: no cover\n\t\t\tshutit_global.shutit_global_object.shutit_print(\"Can't have [target]/rm and [repository]/(push/save/export) set to true\")\n\t\t\tshutit_global.shutit_global_object.handle_exit(exit_code=1)\n\t\tif self.target['hostname'] != '' and self.build['net'] != '' and self.build['net'] != 'bridge': # pragma: no cover\n\t\t\tshutit_global.shutit_global_object.shutit_print('\\n\\ntarget/hostname or build/net configs must be blank\\n\\n')\n\t\t\tshutit_global.shutit_global_object.handle_exit(exit_code=1)\n\t\t# FAILS ends", "def _validate_config(self):\n pass", "def init_config():\n try:\n initConfig()\n click.echo(\"Submarine CLI Config initialized\")\n except AttributeError as err:\n click.echo(err)", "def __init__(self, config_file='/etc/sfa/ldap_config.py'):\n\n try:\n execfile(config_file, self.__dict__)\n\n self.config_file = config_file\n # path to configuration data\n self.config_path = os.path.dirname(config_file)\n except IOError:\n raise IOError, \"Could not find or load the configuration file: %s\" \\\n % config_file", "def test_config_from_file(self):\n parser = Parser()\n args = parser.parser.parse_args(['-c'])\n if args.config:\n config = Config()\n config.config_file = \"./config\"\n config.config = test_config\n config.config_from_file()\n self.assertTrue(config.config)\n os.remove(config.config_file)", "def _loadConfig(self):\n self._packRoot = getattr(sys, \"_MEIPASS\", path.abspath(path.dirname(__file__)))\n rootDir = path.abspath(path.join(self._packRoot, path.pardir))\n logger.debug(\"MOTools root dir is: %s\" % rootDir)\n\n metConf = path.join(rootDir, \"met_config\", \"met_config.json\")\n mainConf = path.join(rootDir, \"main_config.json\")\n userConf = path.join(rootDir, \"user_config.json\")\n\n self._confData = {\n \"MET\": {\"path\": metConf, \"config\": {}, \"loaded\": False},\n \"MAIN\": {\"path\": mainConf, \"config\": {}, \"loaded\": False},\n \"USER\": {\"path\": userConf, \"config\": {}, \"loaded\": False},\n }\n\n for confGroup in self._confData:\n confFile = self._confData[confGroup][\"path\"]\n logger.debug(\"Loading %s config file\" % confGroup)\n if path.isfile(confFile):\n jsonData = {}\n try:\n with open(confFile, mode=\"r\") as inFile:\n jsonData = json.loads(inFile.read())\n if \"config\" in jsonData:\n self._confData[confGroup][\"config\"] = jsonData[\"config\"]\n self._confData[confGroup][\"loaded\"] = True\n except Exception as e:\n logger.error(\"Failed to parse config JSON data.\")\n logger.error(str(e))\n return False\n else:\n logger.debug(\"No file: %s\" % confFile)\n\n # if not self._confData[\"MAIN\"][\"loaded\"]:\n # logger.error(\"Failed to load minimum configuration file main_config.json.\")\n # raise RuntimeError\n\n return", "def __init__(self):\n super().__init__()\n\n etc_conf_names = ('app.conf', 'app.local.conf')\n conf_paths = [os.path.join(APP_DIR, 'etc', c) for c in etc_conf_names]\n\n user_config_path = os.path.join(\n os.path.expanduser('~'),\n '.config',\n 'url_manager.conf'\n )\n conf_paths.append(user_config_path)\n\n self.read(conf_paths)\n self.set('DEFAULT', 'app_dir', APP_DIR)", "def init_config():\n cf = ConfigParser.ConfigParser()\n try :\n cf.read(CONFIG_FILE)\n except os.errno:\n Log.e(TAG, \"Open configuration file error!\")\n return False\n try :\n SW_CONFIG['7zpath'] = cf.get(\"dir_config\", \"7zpath\")\n SW_CONFIG['sharefolder'] = cf.get(\"dir_config\", \"sharefolder\")\n SW_CONFIG['distpath'] = cf.get(\"dir_config\", \"distpath\")\n SW_CONFIG['sw_version'] = cf.get(\"sw_config\", \"version\")\n SW_CONFIG['startup'] = cf.get(\"sw_config\", \"startup\")\n except ConfigParser.Error:\n Log.e(DEBUG_TAG, \"Config file parse error!\")\n clean()\n return False\n try :\n RUN_CONFIG['backup'] = (cf.get(\"run_config\", \"backup\") == \"True\")\n RUN_CONFIG['pop'] = (cf.get(\"run_config\", \"pop\") == \"True\")\n except ConfigParser.Error:\n Log.e(TAG, \"no run config in config file!\")\n RUN_CONFIG['backup'] = False\n RUN_CONFIG['pop'] = False\n try :\n for option in cf.options(\"hook_config\"): \n HOOK_CONFIG[option] = cf.get(\"hook_config\", option)\n except ConfigParser.Error:\n Log.e(TAG, \"No hook config in config file!\")\n if not os.path.exists(SW_CONFIG['sharefolder']):\n try:\n os.makedirs(SW_CONFIG['sharefolder'])\n except os.error:\n print \"Can't create the local folder:\" + SW_CONFIG['distpath'] + \", Please set another one\"\n clean()\n exit()\n if not os.path.exists(SW_CONFIG['distpath']):\n try:\n os.makedirs(SW_CONFIG['distpath'])\n except os.error:\n print \"Can't create the share folder:\" + SW_CONFIG['sharefolder'] + \" temp directory!\"\n clean()\n os.system(\"pause\")\n exit()\n return True", "def init_config(self):\n pass", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(os.path.join(get_current_directory(), 'citi.config'))\n\treturn cfg", "def __init__(self, config_path, normalize=False):\n self.config = {}\n _config_dict = {}\n self._config_path = Utils.expand_path(config_path)\n self.update = None\n self.normalize = normalize", "def set_root(self):\n config_dir = os.path.expanduser(\"~/.local/shs\")\n config_file = os.path.join(config_dir, \"shs_gui.cfg\")\n # check the file and create one if it's not there\n if not os.path.isfile(config_file):\n os.makedirs(config_dir)\n open(config_file, 'w').close()\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n # if config exists and has needed option\n if config.has_option(\"general\", \"root_dir\"):\n return config.get(\"general\", \"root_dir\")\n # make config\n if not config.has_section(\"general\"):\n config.add_section(\"general\")\n dlg = wx.DirDialog(self, \"Select root directory\")\n if dlg.ShowModal() == wx.ID_OK:\n root_dir = dlg.GetPath()\n config.set(\"general\", \"root_dir\", root_dir)\n else:\n sys.exit(1)\n with open(config_file, 'w') as f:\n config.write(f)\n return root_dir", "def test_load(yaml_config_file):\n config = Config()\n config.load(PATH_FILE_CONFIG)\n assert config.backup_root_directory == yaml_config_file.backup\n assert config.docker_compose_wordpress_project_directory == yaml_config_file.docker_compose_wordpress_project", "def __init__(self, path_to_config_file):\n self.file_path = path_to_config_file", "def test_load_configuration_loads_main_file():\n config.load_configuration(main_configuration_path)\n assert config.get('test.nested.path.value') == 'test value'", "def test_find_config_cur_dir(self, in_tmp_path):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n\n path, rel, _ = scuba.config.find_config()\n assert_paths_equal(path, in_tmp_path)\n assert_paths_equal(rel, \"\")", "def initialize(self):\n \n if not readconf.check_file_config(self.config):\n return False\n\n # access fields from the .ini file as named properties in self.config, e.g.\n # an .ini field named host is accessed as self.config['host']\n\n self.pointer=utils.read_file_ptr(self.config)\n \n return True", "def __init__(self):\n self.filename = pathlib.Path(__file__).parent.absolute().__str__() + '/../../data/config.ini'\n self.data = ConfigParser()\n self.data.read(self.filename)", "def setup_config(self, args=None):\n self.config_parse(args=args)", "def test_find_config_parent_dir(self, in_tmp_path):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n\n os.mkdir(\"subdir\")\n os.chdir(\"subdir\")\n\n # Verify our current working dir\n assert_paths_equal(os.getcwd(), in_tmp_path.joinpath(\"subdir\"))\n\n path, rel, _ = scuba.config.find_config()\n assert_paths_equal(path, in_tmp_path)\n assert_paths_equal(rel, \"subdir\")", "def __init__(self, rootPath=None):\n self.rootPath = rootPath or '.'", "def config():\n global base_dir, log_path\n\n # Set paths\n base_dir = os.path.dirname(os.path.realpath(__file__))\n cfg.path = base_dir + '/config.json'\n log_path = base_dir + '/log.log'\n\n # Start logging\n logging.basicConfig(filename=log_path, format='%(asctime)-16s | %(levelname)-5s | %(message)s', level=logging.DEBUG)\n sys.excepthook = _excepthook\n\n # Load configuration\n cfg.load()\n logging.info('Loaded configuration')\n\n # Print configuration and check if is complete\n cfg.print()\n if not cfg.check:\n logging.info('Exiting...')\n sys.exit(1)", "def config_file_setup(logger, cf_label, cf_from_cli=None):\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n\n logger.debug(\"config file paths: {}\".format(config_file_paths))\n\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path", "def check_arg_path(self, p):\n if p.endswith('.qemuboot.conf'):\n self.qemuboot = p\n self.qbconfload = True\n elif re.search('\\.bin$', p) or re.search('bzImage', p) or \\\n re.search('zImage', p) or re.search('vmlinux', p) or \\\n re.search('fitImage', p) or re.search('uImage', p):\n self.kernel = p\n elif os.path.exists(p) and (not os.path.isdir(p)) and '-image-' in os.path.basename(p):\n self.rootfs = p\n # Check filename against self.fstypes can hanlde <file>.cpio.gz,\n # otherwise, its type would be \"gz\", which is incorrect.\n fst = \"\"\n for t in self.fstypes:\n if p.endswith(t):\n fst = t\n break\n if not fst:\n m = re.search('.*\\.(.*)$', self.rootfs)\n if m:\n fst = m.group(1)\n if fst:\n self.check_arg_fstype(fst)\n qb = re.sub('\\.' + fst + \"$\", '', self.rootfs)\n qb = '%s%s' % (re.sub('\\.rootfs$', '', qb), '.qemuboot.conf')\n if os.path.exists(qb):\n self.qemuboot = qb\n self.qbconfload = True\n else:\n logger.warn(\"%s doesn't exist\" % qb)\n else:\n raise Exception(\"Can't find FSTYPE from: %s\" % p)\n\n elif os.path.isdir(p) or re.search(':', p) and re.search('/', p):\n if self.is_deploy_dir_image(p):\n logger.info('DEPLOY_DIR_IMAGE: %s' % p)\n self.set(\"DEPLOY_DIR_IMAGE\", p)\n else:\n logger.info(\"Assuming %s is an nfs rootfs\" % p)\n self.check_arg_nfs(p)\n elif os.path.basename(p).startswith('ovmf'):\n self.ovmf_bios.append(p)\n else:\n raise Exception(\"Unknown path arg %s\" % p)", "def setUp(self):\n self._wiki = None\n self._app = None\n self.rootdir = mkdtemp()\n self.create_file(u'config.py', self.config_content)", "def get_hosts(self):\n self.logger.debug(colorama.Fore.BLUE +\n \"jsnapy.cfg file location used : %s\" %\n get_config_location(), extra=self.log_detail)\n self.logger.debug(colorama.Fore.BLUE +\n \"Configuration file location used : %s\" %\n get_path('DEFAULT', 'config_file_path'), extra=self.log_detail)\n \n if self.args.pre_snapfile is not None:\n output_file = self.args.pre_snapfile\n elif self.args.snapcheck is True and self.args.pre_snapfile is None:\n output_file = \"snap_temp\"\n self.snap_del = True\n else:\n output_file = \"\"\n conf_file = self.args.file\n check = self.args.check\n snap = self.args.snap\n if conf_file is not None:\n if os.path.isfile(conf_file):\n config_file = open(conf_file, 'r')\n self.main_file = yaml.load(config_file)\n elif os.path.isfile(os.path.join(get_path('DEFAULT', 'config_file_path'), conf_file)):\n fpath = get_path('DEFAULT', 'config_file_path')\n config_file = open(os.path.join(fpath, conf_file), 'r')\n self.main_file = yaml.load(config_file)\n else:\n self.logger.error(\n colorama.Fore.RED +\n \"ERROR!! Config file '%s' is not present \" %\n conf_file, extra=self.log_detail)\n sys.exit(1)\n else:\n if self.args.hostname and self.args.testfiles:\n temp_dict = {'hosts':[{'device':'', 'username':'', 'passwd':''}], 'tests':[]}\n temp_dict['hosts'][0]['device'] = self.args.hostname\n temp_dict['hosts'][0]['username'] = self.args.login\n temp_dict['hosts'][0]['passwd'] = self.args.passwd\n for tfile in self.args.testfiles:\n temp_dict['tests'].append(tfile)\n self.main_file = temp_dict\n\n\n #### if --check option is given for sqlite, then snap file name is not compulsory ####\n #### else exit the function saying arguments not correct ####\n if self.main_file.__contains__(\n 'sqlite') and self.main_file['sqlite'] and self.main_file['sqlite'][0]:\n self.chk_database(\n self.main_file,\n self.args.pre_snapfile,\n self.args.post_snapfile,\n check,\n snap)\n else:\n if (self.args.check is True and (\n self.args.file is None or self.args.pre_snapfile is None or self.args.post_snapfile is None)):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\",\n extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n self.login(output_file)", "def _real_paths(config):\n for key in ('--config', '--ffmpeg-bin', '--log', '--music-source', '--working-dir'):\n if not config[key]:\n continue\n config[key] = os.path.realpath(os.path.expanduser(config[key]))", "def pre_process_information(self):\n self.logger.debug(\n colorama.Fore.BLUE\n + \"jsnapy.cfg file location used : %s\" % get_config_location(),\n extra=self.log_detail,\n )\n self.logger.debug(\n colorama.Fore.BLUE\n + \"Configuration file location used : %s\"\n % get_path(\"DEFAULT\", \"config_file_path\"),\n extra=self.log_detail,\n )", "def __init__(self, root_path):\r\n self.root_path = root_path\r\n if not os.path.exists(root_path):\r\n os.makedirs(root_path)", "def readConfigFile(self):\n self.config_obj = ConfigParser.ConfigParser()\n self.config_obj.readfp(open(self.configfile))\n\n # Set the log file\n if (not self.args_obj.log_file and self.config_obj.has_option('DEFAULT','logfile')):\n self.logfile = self.config_obj.get('DEFAULT', 'logfile')\n\n # Set the baud rate\n if (not self.args_obj.baud_rate and self.config_obj.has_option('DEFAULT','baud')):\n self.baudrate = self.config_obj.get('DEFAULT', 'baud')\n\n # Set the device port \n if (not self.args_obj.device and self.config_obj.has_option('DEFAULT','device')):\n self.device = self.config_obj.get('DEFAULT', 'device')\n\n # Set the connection timeout\n if (not self.args_obj.timeout and self.config_obj.has_option('DEFAULT','timeout')):\n self.timeout = self.config_obj.get('DEFAULT','timeout')\n\n if DEBUG:\n print('(DEBUG) Config Options:')\n self.pp.pprint(self.config_obj.sections())", "def __init__(self, config_path=None):\n config_path = config_path or CONF.api_paste_config\n if os.path.exists(config_path):\n self.config_path = config_path\n else:\n self.config_path = CONF.find_file(config_path)", "def _prep_test(self):\n if not os.path.isfile(self.confpath):\n LOGGER.error(\"Conf file not valid: %s\", self.confpath)\n if not os.path.isfile(self.testlist):\n LOGGER.error(\"testlist file not valid: %s\", self.testlist)", "def test_ignore_non_configs_from_current_dir(tmp_path: pathlib.Path) -> None:\n\n cli.startup(tmp_path)\n\n junk_config = tmp_path / \"myconfig.psd\"\n junk_config.touch()\n conf = tmp_path / \"watmyconfig.json\"\n conf.touch()\n configs_found = in_dir(tmp_path)\n assert len(configs_found) == 1", "def _init_config(self, configPath=None):\n # TODO: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2.\n # This alias will be removed in future versions.\n # We still use SafeConfigParser for backwards compatibility with Python 2.\n self.config = SafeConfigParser()\n # Make option names case sensitive\n self.config.optionxform = str\n\n if configPath and os.path.isdir(configPath):\n configDir = configPath\n else:\n configDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')\n\n # List filenames in configDir alphabetically\n _, _, configFiles = next(os.walk(configDir))\n configFiles = sorted(configFiles, key=str.lower)\n\n # Read configuration pipeline\n for f in configFiles:\n with open(os.path.join(configDir, f)) as configFile:\n self.config.readfp(configFile)\n self._store_config_pass()\n\n if configPath and os.path.isfile(configPath):\n self.config.read(configPath)\n self._store_config_pass()\n\n appSection = 'application'\n self.appName = self._get_option_value(appSection, 'appName')\n self.appResource = self._get_option_value(appSection, 'appResource')\n self.appArgs = []\n appArgs = self._get_option_value(appSection, 'appArgs')\n if appArgs:\n self.appArgs = appArgs.split(' ')\n self.mainClass = self._get_option_value(appSection, 'mainClass')" ]
[ "0.6453089", "0.63893116", "0.62628293", "0.6203292", "0.61769146", "0.6169113", "0.6103033", "0.6092019", "0.60891813", "0.6075031", "0.6063229", "0.60553133", "0.60498255", "0.6032961", "0.60153127", "0.5980272", "0.59365386", "0.59269124", "0.5921976", "0.5921835", "0.591809", "0.59162027", "0.59065837", "0.5898748", "0.58953357", "0.5892111", "0.5878654", "0.58723736", "0.58646935", "0.58607477", "0.5845702", "0.5845702", "0.583296", "0.5829095", "0.5826809", "0.5812458", "0.58108616", "0.58080935", "0.57959586", "0.5793713", "0.577614", "0.57749647", "0.5773486", "0.57538927", "0.5753366", "0.57519746", "0.5750386", "0.57502496", "0.57470083", "0.57412636", "0.5732899", "0.57307297", "0.57282424", "0.5722484", "0.57167464", "0.5702766", "0.5702544", "0.56848586", "0.56840664", "0.5680867", "0.56789476", "0.56776834", "0.5673542", "0.5672469", "0.5671791", "0.5669366", "0.5666052", "0.5650121", "0.5649162", "0.56490344", "0.56476814", "0.5646387", "0.5641404", "0.56412005", "0.5640579", "0.5634524", "0.5625793", "0.56227475", "0.56121373", "0.56093526", "0.5607996", "0.56072307", "0.5605271", "0.56050104", "0.55888665", "0.5587677", "0.55847996", "0.5583773", "0.55809546", "0.5573635", "0.5564399", "0.5560984", "0.55600554", "0.5559736", "0.5557547", "0.55558634", "0.5555597", "0.55537426", "0.55531025", "0.55476344", "0.55436933" ]
0.0
-1
Made after reading config file. Check for ffmpeg in path. Create .thumbnails dir if necessary and create .nomedia in it. Copy photobox file to destination dir. Handle priority between command line and config file.
def setup_part2(args): if args.update: args.sourcedir = args.source.sourcedir args.bydir = args.source.bydir args.bydate = args.source.bydate args.diary = args.source.diary args.recursive = args.source.recursive args.dates = args.source.dates args.github_pages = args.source.github_pages elif args.gallery: args.source.sourcedir = args.sourcedir args.source.bydir = args.bydir args.source.bydate = args.bydate args.source.diary = args.diary args.source.recursive = args.recursive args.source.dates = args.dates args.source.github_pages = args.github_pages update_config(args) if args.github_pages: args.html_suffix = '.html' else: args.html_suffix = '.htm' rootext = os.path.splitext(args.rootarg)[1] if rootext: args.rootname = os.path.basename(args.rootarg) else: args.rootname = 'index' + args.html_suffix if args.sourcedir: args.sourcedir = os.path.abspath(args.sourcedir) if os.path.splitdrive(args.sourcedir)[0]: drive, rest = os.path.splitdrive(args.sourcedir) args.sourcedir = drive.upper() + rest if not os.path.isdir(args.sourcedir): error('Directory not found', args.sourcedir) else: if args.gallery and args.diary is False and args.update is None: error('Directory not found', 'Use --sourcedir') if args.dest: args.dest = os.path.abspath(args.dest) if args.dest is None: args.dest = args.root if args.blogger and args.urlblogger is None: error('No blogger url (--url)') if args.gallery or args.update: # check for ffmpeg and ffprobe in path for exe in ('ffmpeg', 'ffprobe'): try: check_output([exe, '-version']) except FileNotFoundError: error('File not found', exe) if args.github_pages: args.thumbrep = 'thumbnails' else: args.thumbrep = '.thumbnails' args.thumbdir = os.path.join(args.dest, args.thumbrep) if not os.path.exists(args.thumbdir): os.mkdir(args.thumbdir) open(os.path.join(args.thumbdir, '.nomedia'), 'a').close() favicondst = os.path.join(args.dest, 'favicon.ico') if not os.path.isfile(favicondst): faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico') shutil.copyfile(faviconsrc, favicondst) photoboxdir = os.path.join(args.dest, 'photobox') if not os.path.exists(photoboxdir): photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox') shutil.copytree(photoboxsrc, photoboxdir) if args.dates: if not(args.gallery or args.create): # silently ignored for the moment, otherwise all other commands will # launch a wanrning or an error on the default --dates value pass if args.dates == 'source': pass elif args.dates == 'diary': if args.create: error('Incorrect date format', args.dates) elif re.match(r'\d+-\d+', args.dates): date1, date2 = args.dates.split('-') if validate_date(date1) and validate_date(date2): args.dates = date1, date2 else: error('Incorrect date format', args.dates) else: error('Incorrect date format', args.dates)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take_one_shot(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))", "def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")", "def thumbnail(self, fnameIn, fnameOut):\n cmd = \"convert -define jpeg:size=500x150 \"\n cmd += '\"%s\" ' % os.path.join(self.downloadFolder, fnameIn)\n cmd += \"-auto-orient -thumbnail 250x150 \"\n cmd += '\"%s\" ' % os.path.join(self.thumbnailFolder, fnameOut)\n self.log(\"creating thumbnail ...\")\n self.log(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()", "def make_video(pattern, plotdir, moviedir, movienametag):\n images_list = glob('%s/%s'%(plotdir, pattern))\n images_list.sort()\n # save all required files into tmp_moviedir, with simple filenames: %.4d.png\n tmp_moviedir = '%s/tmp_movie_%s'%(plotdir, movienametag)\n os.system('mkdir -p %s'%tmp_moviedir)\n for i in range(len(images_list)):\n fname = images_list[i].split('%s/'%plotdir)[-1].split('.png')[0]\n os.system('cp %s/%s.png %s/%.4d.png'%(plotdir, fname, tmp_moviedir, i))\n\n os.system('avconv -i %s'%tmp_moviedir +'/%04d.png ' \\\n +' -y -c:v libx264 -pix_fmt yuv420p %s/%s.mp4'%(moviedir, movienametag))", "def generate_thumbnail():\n import tempfile\n import glob\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n\n if not v:\n return\n\n # do not generate a thumbnail from a Repr\n if '@' in v.take_name:\n return\n\n task = v.task\n project = task.project\n # repo = project.repository\n imf = project.image_format\n width = int(imf.width * 0.5)\n height = int(imf.height * 0.5)\n\n temp_output = tempfile.mktemp()\n\n current_frame = pm.currentTime(q=1)\n output_file = pm.playblast(\n fmt='image',\n startTime=current_frame,\n endTime=current_frame,\n sequenceTime=1,\n forceOverwrite=1,\n filename=temp_output,\n clearCache=1,\n showOrnaments=1,\n percent=100,\n wh=(width, height),\n offScreen=1,\n viewer=0,\n compression='PNG',\n quality=70,\n framePadding=0\n )\n pm.currentTime(current_frame)\n\n output_file = output_file.replace('####', '*')\n found_output_file = glob.glob(output_file)\n if found_output_file:\n output_file = found_output_file[0]\n\n from anima.ui import utils\n utils.upload_thumbnail(task, output_file)\n\n return found_output_file", "def main(base_dir: str, output_dir: str) -> None:\n base_path = pathlib.Path(base_dir)\n output_path = pathlib.Path(output_dir).expanduser()\n\n stage_copy_images(base_path, output_path)\n stage_extract_videos(base_path, output_path)", "def copy_file_to_server():\r\n utils.system_output('mv /home/chronos/user/Downloads/* /usr/local/autotest/results/default/',ignore_status=True)\r\n logging.info(\"Video Copied to Log location\")", "def main():\n destination = Path(argv[1])\n source_files = destination.glob(\"**/*.wma\")\n for file in source_files:\n new_name = file.name.rsplit(\".\", maxsplit=1)[0] + \".flac\"\n dest = str(file.parent / new_name)\n cmd = list(map(str, [\"avconv\", \"-i\", file, dest]))\n if platform == \"win32\":\n print(\"Running on windows... on Unix I'd run the following command:\")\n print(cmd)\n else:\n that = Popen(cmd)\n that.wait()", "def copyMedia(source, target):\n if not os.path.exists(target):\n print(\"copying source,target:\", source, target)\n shutil.copy2(source, target)", "def prep(path,date,image):\n \n # run bash code with 'Popen'\n P = Popen('cp '+path+date+'/final/'+image+' ./', shell=True)\n P.wait()\n P = Popen('mv '+image+' '+image+'.fz', shell=True)\n P.wait()\n P = Popen('funpack *.fz', shell=True)\n P.wait()\n P = Popen('rm -rf *.fz', shell=True)\n P.wait()", "def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()", "def prepare_video(path_to_video: str, number_of_images=87) -> None:\n\n temp_video = path.join(path_to_video, 'temp_outpy.mp4')\n video = path.join(path_to_video, 'outpy.h264')\n\n # create mp4 video for metadata and compute video duration\n subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])\n result = subprocess.run([\"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n \"format=duration\", \"-of\",\n \"default=noprint_wrappers=1:nokey=1\", temp_video],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n video_duration = float(result.stdout)\n\n # create images folder\n path_to_images = path.join(path_to_video, 'images')\n if path.exists(path_to_images) and path.isdir(path_to_images):\n shutil.rmtree(path_to_images)\n makedirs(path_to_images)\n\n # split the given video into images\n subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',\n path.join(path_to_images, 'image%d.jpg')])\n\n # remove extra files\n remove_extra_images(path_to_images, number_of_images)\n remove(temp_video)", "def make_video(input_files, width=0, height=0, frame_rate=24, crf=20, output_path=\"video.mp4\"):\n if isinstance(input_files, list):\n from PIL import Image # pylint: disable=C0415\n\n with Image.open(input_files[0]) as img:\n width, height = img.size\n tmp_dir = \"tmp_ffmpeg_dir\"\n os.mkdir(tmp_dir)\n if width % 2 != 0:\n print(f\"Width ({width}) not divisible by 2\")\n width -= 1\n if height % 2 != 0:\n print(f\"Height ({width}) not divisible by 2\")\n height -= 1\n for i, inp in enumerate(input_files):\n shutil.copy(inp, os.path.join(tmp_dir, f\"{i:06d}.png\"))\n inputs = f\"{tmp_dir}/%06d.png\"\n command = ffmpeg_common_args(frame_rate, inputs, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n for i in range(len(input_files)):\n os.remove(os.path.join(tmp_dir, f\"{i:06d}.png\"))\n os.rmdir(tmp_dir)\n elif isinstance(input_files, str):\n assert width != 0 and height != 0\n command = ffmpeg_common_args(frame_rate, input_files, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n else:\n assert (\n False\n ), f'input_files should be list (of files) or str (of file template, e.g., \"%04d.png\") instead of {type(input_files)}'", "def createThumbnail(self, useCursorPosition=False, dbPath = None, versionInt = None):\n\n return \"\"\n # logger.debug(\"Func: createThumbnail\")\n # projectPath = self.projectDir\n # if useCursorPosition:\n # versionInt = self.currentVersionIndex\n # dbPath = self.currentDatabasePath\n # else:\n # if not dbPath or not versionInt:\n # msg = \"Both dbPath and version must be defined if useCursorPosition=False\"\n # raise Exception ([360, msg])\n #\n # versionStr = \"v%s\" % (str(versionInt).zfill(3))\n # dbDir, shotNameWithExt = os.path.split(dbPath)\n # shotName = os.path.splitext(shotNameWithExt)[0]\n #\n # thumbPath = \"{0}_{1}_thumb.jpg\".format(os.path.join(dbDir, shotName), versionStr)\n # relThumbPath = os.path.relpath(thumbPath, projectPath)\n #\n # # create a thumbnail using playblast\n # thumbDir = os.path.split(thumbPath)[0]\n # if os.path.exists(thumbDir):\n # # frame = pm.currentTime(query=True)\n # frame = cmds.currentTime(query=True)\n # # store = pm.getAttr(\"defaultRenderGlobals.imageFormat\")\n # store = cmds.getAttr(\"defaultRenderGlobals.imageFormat\")\n # # pm.setAttr(\"defaultRenderGlobals.imageFormat\", 8) # This is the value for jpeg\n # cmds.setAttr(\"defaultRenderGlobals.imageFormat\", 8) # This is the value for jpeg\n # # pm.playblast(completeFilename=thumbPath, forceOverwrite=True, format='image', width=221, height=124, showOrnaments=False, frame=[frame], viewer=False, percent=100)\n # cmds.playblast(completeFilename=thumbPath, forceOverwrite=True, format='image', width=221, height=124, showOrnaments=False, frame=[frame], viewer=False, percent=100)\n # # pm.setAttr(\"defaultRenderGlobals.imageFormat\", store) #take it back\n # cmds.setAttr(\"defaultRenderGlobals.imageFormat\", store) #take it back\n # else:\n # # pm.warning(\"something went wrong with thumbnail. Skipping thumbnail\")\n # cmds.warning(\"something went wrong with thumbnail. Skipping thumbnail\")\n # return \"\"\n # # return thumbPath\n # return relThumbPath", "def write_thumbnails(self, appstruct):\n slugser = slugify(appstruct[\"serial\"])\n pdf_filename = \"thumbnails/%s/uploaded.pdf\" % slugser\n top_file = \"thumbnails/%s/top.png\" % slugser\n mos_file = \"thumbnails/%s/mosaic.png\" % slugser\n \n thumg = ThumbnailGenerator(pdf_filename)\n self.save_blob(thumg.top_thumbnail(), top_file)\n self.save_blob(thumg.mosaic_thumbnail(), mos_file)", "def main():\n # load parameters\n params = load_params()\n # check that both profiles exist (we assume that this means the directories\n # are found on disk)\n if not os.path.exists(params['profile1']):\n emsg = 'profile1={0} does not exist'\n eargs = [params['profile1']]\n raise AperoCopyError(emsg.format(*eargs))\n if not os.path.exists(params['profile2']):\n emsg = 'profile2={0} does not exist'\n eargs = [params['profile2']]\n raise AperoCopyError(emsg.format(*eargs))\n # get a list of files from profile 1 for each block kinds\n files1, paths1 = get_files_profile1(params)\n # get the output files for profile 2 for each block kind\n files2, files3, paths2, paths3 = get_files_profile2(params, files1, paths1)\n # copy files from profile 1 to profile 2 for each block kind\n # must copy files to a temporary path first (copying can be slow)\n copy_files(params, files1, files2, files3)\n # may need to update profile 2 (via git) to match profile 1\n update_git_profile2(params)\n # remove all old files from profile 2 blocks\n success = reset_profile2(params, paths3)\n # rename the directories in profile 2 (this is quicker than copying)\n if success:\n success = rename_directories(params, paths2, paths3)\n # update databases for profile 2\n if success:\n update_databases_profile2(params)\n # return to __main__\n return", "def copy_support_files() -> None:\n # root folder files\n filelist = {\"favicon128.png\",\n \"favicon96.png\",\n \"favicon72.png\",\n \"favicon48.png\",\n \"favicon32.png\",\n \"favicon24.png\",\n \"favicon16.png\",\n \"favicon.ico\",\n \"apple-touch-icon.png\",\n \"apple-touch-icon-precomposed.png\",\n \"apple-touch-icon-72x72.png\",\n \"apple-touch-icon-72x72-precomposed.png\",\n \"apple-touch-icon-114x114.png\",\n \"apple-touch-icon-114x114-precomposed.png\",\n \"apple-touch-icon-144x144.png\",\n \"apple-touch-icon-144x144-precomposed.png\",\n \"uca_style.css\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/\" + filename, WEBOUT_PATH)\n except FileNotFoundError:\n report_error(\"Missing file: resources/\" + filename)\n # image folder files\n filelist = {\"film.png\",\n \"stylifera75.png\",\n \"DOI_logo.svg\",\n \"size_hist.png\",\n \"size_ind.png\",\n \"size_mean.png\",\n \"size_range.png\",\n \"size_summary.png\",\n \"double_clawed.jpg\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/images/\" + filename, WEBOUT_PATH + \"images/\")\n except FileNotFoundError:\n report_error(\"Missing file: resources/images/\" + filename)\n filelist = {\"specific_word_cloud.png\",\n \"binomial_word_cloud.png\"}\n for filename in filelist:\n try:\n shutil.copy2(TMP_PATH + filename, WEBOUT_PATH + \"images/\")\n except FileNotFoundError:\n report_error(\"Missing file: \" + TMP_PATH + filename)\n # font-awesome files\n filelist = {\"fontawesome.min.js\",\n \"brands.min.js\",\n \"regular.min.js\",\n \"solid.min.js\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/font-awesome/js/\" + filename, WEBOUT_PATH + \"js/\")\n except FileNotFoundError:\n report_error(\"Missing file: resources/font-awesome/js/\" + TMP_PATH + filename)\n # flag-icon files\n filelist = {\"flag-icons.min.css\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/flag-icon-css/css/\" + filename, WEBOUT_PATH + \"images/flag-icon-css/css/\")\n except FileNotFoundError:\n report_error(\"Missing file: images/flag-icon-css/css/\" + TMP_PATH + filename)\n filelist = {\"de.svg\", # Germany\n \"es.svg\", # Spain\n \"ru.svg\", # Russia\n \"fr.svg\", # France\n \"pt.svg\", # Portugal\n \"dk.svg\", # Denmark\n \"nl.svg\", # Netherlands\n \"jp.svg\", # Japan\n \"cn.svg\", # China\n \"us.svg\", # USA\n \"th.svg\", # Thailand\n \"va.svg\", # Vatican\n \"it.svg\", # Italy\n \"kr.svg\", # South Korea\n \"pl.svg\", # Poland\n \"mm.svg\", # Myanamar (Burma)\n \"sa.svg\", # Saudi Arabia (best option for Arabic of those available)\n \"id.svg\", # Indonesia\n \"za.svg\", # South Africa (best option for Afrikaans)\n \"my.svg\", # Malaysia (for Malay)\n \"mg.svg\", # Madagascar (for Malagasy)\n \"ir.svg\", # Iran (for Persian)\n \"vn.svg\"} # Vietnam\n for filename in filelist:\n try:\n shutil.copy2(\"resources/flag-icon-css/flags/4x3/\" + filename, WEBOUT_PATH +\n \"images/flag-icon-css/flags/4x3/\")\n except FileNotFoundError:\n report_error(\"Missing file: images/flag-icon-css/flags/4x3/\" + TMP_PATH + filename)", "def post_process_hls(d):\r\n\r\n log('post_process_hls()> start processing', d.name)\r\n\r\n local_video_m3u8_file = os.path.join(d.temp_folder, 'local_video.m3u8')\r\n local_audio_m3u8_file = os.path.join(d.temp_folder, 'local_audio.m3u8')\r\n\r\n cmd = f'\"{config.ffmpeg_actual_path}\" -loglevel error -stats -y -protocol_whitelist \"file,http,https,tcp,tls,crypto\" ' \\\r\n f'-allowed_extensions ALL -i \"{local_video_m3u8_file}\" -c copy \"file:{d.temp_file}\"'\r\n error, output = run_command(cmd, d=d)\r\n\r\n if error:\r\n # retry without \"-c copy\" parameter, takes longer time\r\n cmd = f'\"{config.ffmpeg_actual_path}\" -loglevel error -stats -y -protocol_whitelist \"file,http,https,tcp,tls,crypto\" ' \\\r\n f'-allowed_extensions ALL -i \"{local_video_m3u8_file}\" \"file:{d.temp_file}\"'\r\n error, output = run_command(cmd, d=d)\r\n\r\n if error:\r\n log('post_process_hls()> ffmpeg failed:', output)\r\n return False\r\n\r\n if 'dash' in d.subtype_list:\r\n cmd = f'\"{config.ffmpeg_actual_path}\" -loglevel error -stats -y -protocol_whitelist \"file,http,https,tcp,tls,crypto\" ' \\\r\n f'-allowed_extensions ALL -i \"{local_audio_m3u8_file}\" -c copy \"file:{d.audio_file}\"'\r\n error, output = run_command(cmd, d=d)\r\n\r\n if error:\r\n # retry without \"-c copy\" parameter, takes longer time\r\n cmd = f'\"{config.ffmpeg_actual_path}\" -loglevel error -stats -y -protocol_whitelist \"file,http,https,tcp,tls,crypto\" ' \\\r\n f'-allowed_extensions ALL -i \"{local_audio_m3u8_file}\" \"file:{d.audio_file}\"'\r\n error, output = run_command(cmd, d=d)\r\n\r\n if error:\r\n log('post_process_hls()> ffmpeg failed:', output)\r\n return False\r\n\r\n log('post_process_hls()> done processing', d.name)\r\n\r\n return True", "def save_video(foldername, songname, songlen, num_steps, output):\n num_steps_by_len = num_steps / songlen\n p = subprocess.Popen(['ffmpeg', '-f', 'image2', '-r', str(num_steps_by_len), '-i', '%d.png', '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-vf', 'pad=ceil(iw/2)*2:ceil(ih/2)*2', 'movie.mp4'], cwd=foldername)\n p.wait()\n\n p = subprocess.Popen(['ffmpeg', '-i', 'movie.mp4', '-i', '../audio_files/' + songname + '.mp3', '-map', '0:v', '-map', '1:a', '-c', 'copy', output], cwd=foldername)\n p.wait()", "def create_movie(name, folder):\n cmd = [\"ffmpeg\", \"-framerate\", \"1\", \"-i\", folder + \"/pic%04d.png\", \"-c:v\",\n \"libx264\", \"-r\", \"30\", \"-pix_fmt\", \"yuv420p\", name]\n return subprocess.call(cmd)", "def _copy_snpeff_config(self):\n\n CONFIG = sequana_data(\"snpEff.config\", \"snpeff\")\n os.makedirs(self.snpeff_datadir, exist_ok=True)\n shutil.copyfile(CONFIG, self.configfile)", "def let_camera_update_parameters(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -ss 00:00:02 -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))", "def _init(args, workflows_dir, config_path):\n for file in [\"samples.tsv\", \"config.yaml\"]:\n src = os.path.join(workflows_dir, args.workflow.replace(\"-\", \"_\"), file)\n dest = os.path.join(os.path.dirname(config_path), file)\n\n copy_file = True\n if os.path.exists(dest) and args.force is False:\n choices = {\"yes\": True, \"y\": True, \"no\": False, \"n\": False}\n\n sys.stdout.write(f\"File: {dest} already exists. Do you want to overwrite it? (yes/no) \")\n while True:\n choice = input().lower()\n if choice in choices:\n copy_file = choices[choice]\n break\n else:\n print(\"Please respond with yes (y) or no (n).\")\n\n if copy_file:\n shutil.copyfile(src, dest)", "def get_video_as_images():\n experiments = ['me1.mp4']\n try:\n if (os.path.isdir(\"dump\")):\n shutil.rmtree('dump')\n except OSError:\n print (\"Deletion of the directory failed\")\n exit()\n os.system('mkdir dump')\n for experiment in experiments:\n exp_no_ext = experiment.split('.')[0]\n subdir_cmd = \"dump/{0}\".format(exp_no_ext)\n os.mkdir(subdir_cmd)\n os.system('ffmpeg -i videos/%s dump/%s/%s%%03d.jpg' % (experiment, exp_no_ext, exp_no_ext))\n run_all(exp_no_ext)", "def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)", "def main():\n parser = CustomArgumentParser()\n parser.add_argument(\"-s\", \"--simon-sez\",\n help=\"Really, Simon sez copy the data!\", action=\"store_true\")\n parser.add_argument(\"-r\", \"--src-directory\",\n help=\"Copy metadata from files in this directory.\")\n parser.add_argument(\"-d\", \"--dst-directory\",\n help=\"Copy metadata to matching files in this directory.\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"Log level to DEBUG.\",\n action=\"store_true\")\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n error = False\n\n # Require these two arguments.\n for arg in [args.src_directory, args.dst_directory]:\n if not arg:\n logger.error(\n \"Required src or dst directory parameter missing.\")\n error = True\n # XXX: Duplicates exit below. Can't check directory if null.\n logger.error(\"Exiting due to errors.\")\n parser.usage_message()\n sys.exit(1)\n\n if (os.path.exists(args.src_directory) and\n os.path.isdir(args.src_directory)):\n src_directory = args.src_directory\n else:\n logger.error(\n \"--src-directory={} does not exist or is not a directory.\".format(\n args.dst_directory))\n error = True\n\n if (os.path.exists(args.dst_directory) and\n os.path.isdir(args.dst_directory)):\n dst_directory = args.dst_directory\n else:\n logger.error(\n \"--dst-directory={} does not exist or is not a directory.\".format(\n args.dst_directory))\n error = True\n\n if error:\n logger.error(\"Exiting due to errors.\")\n parser.usage_message()\n sys.exit(1)\n else:\n process_all_files(src_directory, dst_directory, simon_sez=args.simon_sez)", "def make_image_dir(to_path, filenames):\n image_dir = os.path.join(to_path, \"image_2\")\n os.makedirs(image_dir)\n for f in filenames:\n image_file = os.path.join(image_dir, f + \".png\")\n os.system(\"cp sample.png {}\".format(image_file))", "def write_upload_files(self, appstruct):\n \n # Create the directory if it does not exist\n final_dir = \"thumbnails/%s\" % slugify(appstruct[\"serial\"])\n if not os.path.exists(final_dir):\n log.info(\"Make directory: %s\", final_dir)\n os.makedirs(final_dir)\n\n final_file = \"%s/uploaded.pdf\" % final_dir\n file_pointer = appstruct[\"pdf_upload\"][\"fp\"]\n self.single_file_write(file_pointer, final_file)", "def TextureFiles():\n import shutil\n\n # first convert the .psd files to .png\n\n FbmDir = glo.outputFolder + '.fbm'\n\n for d1, d2, filenames in os.walk(FbmDir):\n for filename in filenames:\n \"\"\"filename: vitrin_diffuse.psd\n \"\"\"\n # print \"TextureFiles():\", filename\n if filename[-4:].upper() == '.PSD':\n #print \" -- FbmDir:\" , FbmDir\n #print \" -- in the if clause with filename:\" , filename\n #print \" -- glo.outputFolder\" , glo.outputFolder\n # FbmDir = '../fbx/simplelifeembedmedia.fbm'\n # filename = 'shelves_light.PSD'\n PsdToPngConverter(FbmDir, filename)\n\n # Move only the .png file to the ../png/ directory\n filename = filename[:-4] + '.png'\n src = os.path.join(FbmDir, filename)\n elif filename[0] != '.':\n src = os.path.join(FbmDir, filename)\n pass\n\n shutil.copy(src, glo.outputFolder)\n print os.path.join(glo.outputFolder, filename), \"\\n\"\n sys.stdout.flush()\n # for d1, d2, files in os.walk(glo.outputFolder):\n # if not filename in files:\n # #print \"moving: \", files, filename, not filename in files\n # shutil.copy(src, glo.outputFolder)\n # print os.path.join(glo.outputFolder, filename), \"\\n\"\n # else:\n # print \"%s/%s already exists. File not moved\" % (glo.outputFolder,filename)", "def handle_image(name):\n from_path = args.from_dir + name\n to_path = args.to_dir + name\n\n if width != args.width:\n subprocess.call('jpegtran -rotate 90 -grayscale ' + from_path + ' > ' \\\n + to_path, shell=True)\n else:\n subprocess.call('jpegtran -grayscale ' + from_path + ' > ' + to_path,\\\n shell=True)", "def photo2web_process_hattenbach():\n\n os.chdir('/Volumes/SSD External/Hattenbach_v2')\n \n dir_base = os.getcwd()\n \n dir_p2w = '/Users/throop/photos/Trips/'\n \n dirs = sorted(glob.glob(os.path.join(dir_base, '*')))\n \n quality_out = '60'\n size_out = '2000x2000'\n \n for i,dir in enumerate(dirs):\n if os.path.isdir(dir):\n os.chdir(dir)\n dir_originals = os.path.join(dir, 'originals')\n dir_originals_fullres = os.path.join(dir, 'originals_fullres')\n\n# For HH files, copy the 'actual' originals into a 'fullres' folder, for safekeeping\n\n if not os.path.isdir(dir_originals_fullres):\n os.rename(dir_originals, dir_originals_fullres)\n os.mkdir(dir_originals)\n \n files = glob.glob(os.path.join(dir_originals_fullres, '*'))\n\n# Get a list of all the images\n\n# For each image, make a low-res, low-quality image. This is just because the scanned files\n# are huge and high-quality, and not useful for online. They are much larger than necessary. \n# So we use 'convert' to shrink them in size and quality, and put the output into 'originals' directory \n# for photo2web.\n\n for file in files:\n file_short = os.path.basename(file)\n file_in = os.path.join(dir_originals_fullres,file_short)\n file_out = os.path.join(dir_originals,file_short)\n if not os.path.isfile(file_out):\n cmd = (f'convert -resize {size_out} -quality {quality_out}' +\n f' {file_in}' +\n f' {file_out}')\n print(f'{cmd}')\n \n subprocess.run(['convert', '-resize', size_out, '-quality', quality_out,\n file_in,\n file_out])\n\n# Now, finally, go thru and do photo2web on all of them.\n \n print(f'\\nProcessing directory {i}/{len(dirs)} {dir}\\n')\n subprocess.run(['cp', '-r', os.path.join(dir_p2w, 'header.txt'), '.'])\n subprocess.run(['cp', '-r', os.path.join(dir_p2w, 'photos.css'), '.'])\n if not os.path.exists('captions.txt'):\n subprocess.run(['captions_photo2web']) \n subprocess.run(['photo2web_old'])\n subprocess.run(['photo2web'])", "def config_copy(ipydir, profile):\n for fpath in profile_files(profile):\n filename = osp.basename(fpath)\n dest_file = osp.join(ipydir, 'profile_' + profile, 'startup',\n filename)\n shutil.copy(fpath, dest_file)\n logger.info(\"Copy files '%s' for profile '%s'.\",\n osp.basename(filename), profile)", "def main():\n # Parameters\n opt = get_args()\n\n assert os.path.exists(opt.path_video), \"Video file does not exist\"\n try:\n os.makedirs(opt.path_images)\n except Exception:\n print(\"Folder already exists. Overwriting it\")\n pass\n\n assert opt.size is None or opt.size is not None and len(opt.size) <= 2, \"Make sure the size indicated contains at maximum two numbers [none, max_dimension or width and height]\"\n\n # Get base path\n base_path = os.path.join(opt.path_images, opt.basename)\n\n # Load video from file\n try:\n cap = cv2.VideoCapture(opt.path_video)\n except Exception as e:\n print('Video failed to be loaded:', e)\n sys.exit(0)\n\n # Parse video\n parse_video(cap, base_path, opt.step, opt.size)\n\n # Release capture\n cap.release()\n cv2.destroyAllWindows()\n \n return 0", "def main():\n\n # Just grab all files - we'll use try/except to filter\n images = glob.glob(os.path.join(args.input_dir, '*.*'))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n for img_file in images:\n print(img_file)\n try:\n np_img = plt.imread(img_file)\n print(np_img.shape)\n img_name = img_file.split(os.sep)[-1]\n new_img_file = os.path.join(args.output_dir, img_name)\n pad_image(np_img, new_img_file)\n except Exception as e:\n print('Warning: {}. Skpping file.'.format(e))\n continue", "def generate_thumbnail(self, img_path):\n\n thumb_path = self.thumbnail_path(img_path)\n dirpath = os.path.dirname(thumb_path)\n try:\n os.makedirs(dirpath)\n except OSError: # path exists\n pass\n\n cmd = [\n '/usr/local/bin/gm',\n 'convert',\n '-thumbnail', '256x256>',\n '-background', 'transparent',\n '-gravity', 'center',\n '-extent', '256x256',\n img_path, thumb_path\n ]\n\n retcode = subprocess.call(cmd)\n\n if retcode:\n log.error('convert exited with %d : %s', retcode, img_path)\n return False\n\n log.debug('Wrote thumbnail for `%s` to `%s`.', img_path, thumb_path)\n\n return True", "def take_door_photo():\n\n # based on lukec's code in VHS.pm\n config = yaml.load(file('/etc/vhs.yaml'))\n short_hash = hashlib.sha256(str(datetime.datetime.now())).hexdigest()[0:6]\n pic_base = config.get('picture_base')\n if pic_base:\n filename = os.path.join(pic_base, '%s.jpeg' % short_hash)\n os.system('streamer -c /dev/video0 -b 16 -o %s >/dev/null 2>&1' % filename)\n short_file = os.path.splitext(filename)[0] + '.jpg'\n os.rename(filename, short_file)\n pic_uri_base = config.get('picture_uri_base') \n if pic_uri_base and os.path.exists(short_file):\n pic_uri = '%s/%s' % (pic_uri_base, os.path.basename(short_file))\n return (pic_uri, short_file)\n\n return None", "def main(input):\n path = os.path.abspath(input)\n name = os.path.splitext(os.path.basename(path))[0]\n p = os.path.join(os.getcwd(),name)\n i = 1\n p1 = p\n while os.path.exists(p1):\n p1 = \"{p}-{i}\".format(p=p,i=i)\n i += 1\n p = p1\n os.mkdir(p1)\n os.mkdir(os.path.join(p1,\"media\"))\n with zipfile.ZipFile(path) as zf:\n for file in zf.namelist():\n # Path traversal defense copied from\n # http://hg.python.org/cpython/file/tip/Lib/http/server.py#l789\n words = file.split('/')\n dest = os.path.join(p1, \"media\")\n if words[0] == \"word\" and words[1] == \"media\":\n for word in words[2:]:\n while True:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if not drive:\n break\n if word in (os.curdir, os.pardir, ''):\n continue\n dest = os.path.join(dest, word)\n click.echo(\"{} -> {}\".format(file, dest))\n of = open(dest, 'wb')\n of.write(zf.read(file))\n of.close()\n\n newdoc = os.path.join(p1, os.path.basename(path))\n lyxfile = os.path.join(p1, name + \".lyx\")\n texfile = os.path.join(p1, name + \".tex\")\n shutil.copyfile(path, newdoc)\n os.system(\"pandoc -s -f docx -t latex -o '{of}' '{i}'\".format(of=texfile, i=newdoc))\n os.system(\"tex2lyx '{i}' '{o}'\".format(i=texfile, o=lyxfile))\n os.remove(texfile)\n os.system(\"convertwmf {dir}\".format(dir=os.path.join(p1, \"media\")))\n click.echo(lyxfile)", "def make_video(data,\n xdim, ydim, sample_read_rows, sample_read_cols, image_write_rows, image_write_cols,\n directory, filename, fps = 24.0, start_frame = 1, end_frame = None, timestamp = False, fontsize = 30, ts_pos = (0,0), save_raw = False):\n\n #Command to send via the command prompt which specifies the pipe parameters\n # command = ['ffmpeg',\n # '-y', # (optional) overwrite output file if it exists\n # '-f', 'image2pipe',\n # '-vcodec', 'mjpeg', #'mjpeg',\n # '-r', '1',\n # '-r', str(fps), # frames per second\n # '-i', '-', # The input comes from a pipe\n # '-an', # Tells FFMPEG not to expect any audio\n # '-vcodec', 'mpeg4',\n # '-b:v', '5000k',\n # directory + filename + \"/\"+filename+\".mp4\",\n # '-hide_banner',\n # '-loglevel', 'panic']\n\n # Create directories if they don't exist\n if not os.path.exists(os.path.join(directory, filename, 'frames/')):\n os.makedirs(os.path.join(directory, filename, 'frames/'))\n if save_raw and not os.path.exists(os.path.join(directory, filename, 'frames-raw/')):\n os.makedirs(os.path.join(directory, filename, 'frames-raw/'))\n\n if end_frame == None:\n end_frame = data.FrameCount\n\n cm = colormap.get_cmap('viridis')\n\n for i, frame_offset in enumerate(tqdm.tqdm(range(start_frame, end_frame))):\n frame = FrameRead(data, frame_offset)\n frame_image = np.zeros([ydim, xdim], dtype=np.uint8)\n frame_image[image_write_rows, image_write_cols] = frame.frame_data[sample_read_rows, sample_read_cols]\n\n rgb_im = Image.fromarray(cm(frame_image, bytes=True)).convert('RGB')\n rgb_im.save(os.path.join(directory, filename, 'frames/', f'{i}.jpg'), 'JPEG')\n\n if save_raw:\n Image.fromarray(np.uint8(frame.frame_data), mode='L').save(os.path.join(directory, filename, 'frames-raw/', f'{i}.jpg'), 'JPEG')", "def genThumbnail(filename,thumbnailType,config,regen=False):\n # define the sizes of the various thumbnails\n thumbnailTypeDefinitions={\n 's': (75,75), #should be square eventually\n 'q': (150,150), #should be square eventually\n 't': (100,100),\n 'm': (240,240),\n 'n': (320,230),\n 'k': (500,500),\n 'c': (800,800),\n 'b': (1024,1024)}\n size = thumbnailTypeDefinitions[thumbnailType]\n thumbFilename = filename.split('.')[0] + '_' + thumbnailType + '.' + filename.split('.')[1]\n if os.path.isfile(config['LOCALARCHIVEPATH']+'/'+thumbFilename) and regen == False:\n return(thumbFilename)\n else:\n try:\n logger.info('Generating thumbnail: %s' %(config['LOCALARCHIVEPATH']+'/'+thumbFilename))\n img = Image.open(config['LOCALARCHIVEPATH']+'/'+filename)\n icc_profile = img.info.get('icc_profile')\n img.thumbnail(size,Image.ANTIALIAS)\n img.save(config['LOCALARCHIVEPATH']+'/'+thumbFilename, 'JPEG', icc_profile=icc_profile, quality=95)\n return(thumbFilename)\n except IOError as e:\n raise e", "def add_media(file, mode, gui_instance=None):\r\n\r\n global option # Using the global variable that specifies user choice (typically \"Yes\" or \"No\" choices)\r\n global config_var # Using the global variable that reads and modifies the configuration file\r\n\r\n basename_file = os.path.basename(file)\r\n\r\n # Checking if the specified file is a valid media file\r\n if basename_file.endswith('.mp3') or basename_file.endswith('.wav'):\r\n assumed_artist = \"\" # This variable will store the artist of the media\r\n\r\n if \"-\" in basename_file:\r\n\r\n \"\"\" \r\n Usually, media files use a '-' character to split the title of the media and the artist.\r\n This algorithm will attempt to automatically 'guess' the title and artist of the media if this\r\n character is present.\r\n \"\"\"\r\n\r\n # If there is a whitespace before the '-' character, we remove it\r\n if basename_file.split(\"-\")[0].endswith(\" \"):\r\n assumed_artist = basename_file.split(\"-\")[0][:-1] # The auto-processed artist name\r\n else:\r\n assumed_artist = basename_file.split(\"-\")[0]\r\n\r\n if file.split(\"-\")[1].startswith(\" \"): # If there is a whitespace after the '-' character, we remove it\r\n assumed_title = basename_file.split(\"-\")[1][1:-4] # The auto-processed media title\r\n else:\r\n assumed_title = basename_file.split(\"-\")[1][:-4]\r\n\r\n else: # If no \"-\" character is present in the title of the file, assuming the title is the name of the file\r\n assumed_title = os.path.splitext(basename_file)[0]\r\n\r\n if config_var['RUN-MODE']['run_mode'] == \"2\": # Debugging mode\r\n print(\"\\nAssumed title: \" + assumed_title)\r\n print(\"Assumed artist: \" + assumed_artist)\r\n\r\n if not mode: # The user is attempting to add media files from another directory\r\n try:\r\n shutil.copy2(file, media_folder) # Copying the source file to the media folder\r\n\r\n except PermissionError: # Application does not have permission to write in the media folder\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Unable to copy file\", \"Unable to copy file to media folder. Make sure you \"\r\n \"haven't selected a write-protected folder.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError: Unable to copy file to media folder. Make sure you haven't selected a \"\r\n \"write-protected folder.\")\r\n\r\n return False\r\n\r\n # Updating the database\r\n cursor = connection.cursor()\r\n\r\n # Getting the full path of the file (using an app-level convention for slashes)\r\n full_path = os.path.join(media_folder, os.path.basename(file)).replace(\"\\\\\", \"/\")\r\n\r\n cursor.execute(\"SELECT COUNT(1) FROM media WHERE full_path = \\\"\" + full_path + \"\\\"\")\r\n result = int(str(cursor.fetchone())[1])\r\n\r\n if not result: # The selected file is not present in the database\r\n sql_command = ''' INSERT INTO media(title, artist, album, release_date, tags, full_path)\r\n VALUES (?, ?, ?, ?, ?, ?) '''\r\n\r\n values = (assumed_title, assumed_artist, '', '', '', full_path)\r\n\r\n try: # Attempting to add the media file to the database\r\n cursor.execute(sql_command, values)\r\n\r\n connection.commit()\r\n\r\n except Error: # Database is locked\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Database is locked\", \"Error when trying to commit changes to database. Make \"\r\n \"sure another application is not using the database.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError when trying to commit changes to database. Make sure another application is not \"\r\n \"using the database.\")\r\n\r\n return False\r\n\r\n if gui_instance is not None: # The method has been fired by a GUI widget\r\n gui_instance.display_media() # Updating the media list\r\n\r\n else: # The method has been fired by using CLI\r\n cursor.execute(\"SELECT id FROM media WHERE full_path = \\\"\" + full_path + \"\\\"\")\r\n new_id = cursor.fetchone()\r\n\r\n print(\"\\nThe song was added successfully!\\n\\nThe ID of the song is: \" + str(new_id[0]) +\r\n \"\\nDo you want to configure the song metadata now? (Y/N)\")\r\n\r\n option = input() # Getting user response\r\n if option.lower() == \"y\": # The user has responded affirmatively\r\n SongStorageCLI.configure_media(full_path)\r\n\r\n else: # The user has responded negatively\r\n print(\"\\nThe auto-processing tool assumed that the name of the song is \\\"\" + assumed_title + \"\\\" \" +\r\n \"and that the name of the artist is \\\"\" + assumed_artist + \"\\\".\\nYou can always change \" +\r\n \"these values, as well as other metadata information, by using the \\\"Modify_data \" +\r\n str(new_id[0]) + \"\\\" command.\")\r\n return\r\n\r\n else: # The selected file already exists in the database; letting the user know\r\n if gui_instance is not None: # The method has been fired by a GUI widget\r\n messagebox.showinfo(\"Media file already exists\",\r\n \"The selected file already exists in the media folder.\")\r\n\r\n else: # The method has been fired by using CLI\r\n print(\"There is already a song with this name in the media folder!\")\r\n\r\n return False\r\n\r\n if config_var['RUN-MODE']['run_mode'] == \"2\": # Debugging mode\r\n print(\"\\nMedia file has been added successfully.\")\r\n\r\n return True", "def copy_database(path_images, path_labels, path_final_images):\n\n try:\n labels = sorted(os.listdir(path_labels))\n except FileNotFoudError:\n print(\"No such file or directory \", path_labels)\n\n try:\n images = sorted(os.listdir(path_images)) #+ \"RetinaNet_I04590/\"))\n except FileNotFoudError:\n print(\"No such file or directory \", path_images)\n\n \"\"\"if not os.path.exists(path_final_images + \"I04590/\"):\n os.mkdir(path_final_images + \"I04590/\")\n\n if not os.path.exists(path_final_images + \"I045135/\"):\n os.mkdir(path_final_images + \"I045135/\")\n\n if not os.path.exists(path_final_images + \"I090135/\"):\n os.mkdir(path_final_images + \"I090135/\")\n\n if not os.path.exists(path_final_images + \"I4590135/\"):\n os.mkdir(path_final_images + \"I4590135/\")\n\n if not os.path.exists(path_final_images + \"Params/\"):\n os.mkdir(path_final_images + \"Params/\")\n\n if not os.path.exists(path_final_images + \"Pauli2/\"):\n os.mkdir(path_final_images + \"Pauli2/\")\n\n if not os.path.exists(path_final_images + \"Pauli3/\"):\n os.mkdir(path_final_images + \"Pauli3/\")\n\n if not os.path.exists(path_final_images + \"Stokes/\"):\n os.mkdir(path_final_images + \"Stokes/\")\n\n if not os.path.exists(path_final_images + \"Rachel/\"):\n os.mkdir(path_final_images + \"Rachel/\")\n\n if not os.path.exists(path_final_images + \"Rachel2/\"):\n os.mkdir(path_final_images + \"Rachel2/\")\"\"\"\n\n for k in range(len(images)):\n if str(k) + \".xml\" in labels:\n copyfile(path_images + \"/\" + images[k],\n path_final_images + \"/\" + images[k])\n \"\"\"copyfile(path_images + \"RetinaNet_I04590/\" + str(k) + \".png\",\n path_final_images + \"I04590/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I045135/\" + str(k) + \".png\",\n path_final_images + \"I045135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I090135/\" + str(k) + \".png\",\n path_final_images + \"I090135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I4590135/\" + str(k) + \".png\",\n path_final_images + \"I4590135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Params/\" + str(k) + \".png\",\n path_final_images + \"Params/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli2/\" + str(k) + \".png\",\n path_final_images + \"Pauli2/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli3/\" + str(k) + \".png\",\n path_final_images + \"Pauli3/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Stokes/\" + str(k) + \".png\",\n path_final_images + \"Stokes/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel/\" + str(k) + \".png\",\n path_final_images + \"Rachel/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel2/\" + str(k) + \".png\",\n path_final_images + \"Rachel2/\" + str(k) + \".png\")\n copyfile(path_labels + str(k) + \".xml\",\n path_final_labels + str(k) + \".xml\")\"\"\"\n print(k)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--visualize', dest='visualize_dir', help=\"Path to directory to load all vizualization info from\")\n parser.add_argument('--overwrite', dest='overwrite', default=False, action='store_true', help=\"Overwrite existing logs parts if found\")\n args = parser.parse_args()\n if not args.visualize_dir:\n print \"Missing required argument, --visualize\"\n exit(-1)\n\n dsrc_log_file = args.visualize_dir + '/dsrc.log'\n radar_log_file = args.visualize_dir + '/radar.log'\n video_file = args.visualize_dir + '/video.mp4'\n log_config = args.visualize_dir + '/config.json'\n\n config = parse_config(log_config)\n\n if 'parts_auto_enabled' in config and config['parts_auto_enabled']:\n cap = cv2.VideoCapture(video_file)\n fps = cap.get(cv2.CAP_PROP_FPS)\n frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n duration = float(frames) / fps\n cap.release()\n\n print 'Video duration: %s' % duration\n start = 0\n count = 1\n while start < duration:\n config['parts'].append({\n 'start': start,\n 'end': start + config['parts_auto_interval'],\n 'name': 'auto_part_%s' % count\n })\n count = count + 1\n start = start + config['parts_auto_interval']\n\n print config \n\n for index, part in enumerate(config['parts']):\n part_path = args.visualize_dir + '/' + (part['name'] if 'name' in part else 'part_%s' % (index+1))\n print \"---------------------------------------\"\n print \" Writing log to %s\" % part_path\n print \"---------------------------------------\"\n if not args.overwrite and os.path.exists(part_path):\n print \"Log already exists, skipping...\"\n continue\n\n if not os.path.exists(part_path):\n os.makedirs(part_path)\n\n export_part_video(part, part_path, video_file)\n export_part_log(part, part_path + '/radar.log', radar_log_file, config['video_start'])\n export_part_log(part, part_path + '/dsrc.log', dsrc_log_file, config['video_start'])\n export_part_config(part_path + '/config.json', config)", "def deploy_user_media(env=None, haus_vars={} ):\n print green('Deploying user media')\n with cd(\"/var/www\"):\n run('./manage.py sync_media_s3 --prefix=uploads')", "def get_thumbnail_path(examfile):\n h = examfile.hash\n thumb = os.path.join(settings.MEDIA_ROOT, \"cache\", h + \".png\")\n if os.path.exists(thumb):\n return thumb\n path = os.path.join(settings.MEDIA_ROOT, examfile.path.path)\n cmd = \"/usr/local/bin/mudraw -o %s -h 800 -w 600 '%s' 1\" % (thumb, path)\n args = shlex.split(cmd.encode(\"utf8\"))\n mudraw = subprocess.Popen(args)\n mudraw.wait()\n return thumb", "def web_archive_config():\n\n try:\n auth_check()\n except Exception as e:\n return flask.redirect(str(e))\n\n socket_timeout = flask.request.args.get('ytdl-socket-timeout', '120')\n retries = flask.request.args.get('ytdl-retries', 'infinite')\n output = flask.request.args.get('ytdl-output', '%(uploader_id)s/%(id)s.%(ext)s')\n overwrites = flask.request.args.get('ytdl-overwrites', 'false') == 'true'\n info_json = flask.request.args.get('ytdl-info-json', 'true') == 'true'\n thumbnail = flask.request.args.get('ytdl-thumbnail', 'true') == 'true'\n format = flask.request.args.get('ytdl-format', 'bestvideo[vcodec^=vp]' +\n '+bestaudio[acodec=opus]/bestvideo+bestaudio[acodec=opus]' +\n '/bestvideo+bestaudio/best')\n merge_format = flask.request.args.get('ytdl-merge-format', 'mkv')\n all_subs = flask.request.args.get('ytdl-all-subs', 'true') == 'true'\n sub_format = flask.request.args.get('ytdl-sub-format', 'srt/best')\n convert_subs = flask.request.args.get('ytdl-convert-subs', 'srt')\n\n config = io.BytesIO()\n\n config.write(('--socket-timeout ' + socket_timeout + '\\n').encode('utf-8'))\n config.write(('--retries ' + retries + '\\n').encode('utf-8'))\n config.write(('--output ' + output + '\\n').encode('utf-8'))\n if not overwrites:\n config.write('--no-overwrites\\n'.encode('utf-8'))\n if info_json:\n config.write('--write-info-json\\n'.encode('utf-8'))\n if thumbnail:\n config.write('--write-thumbnail\\n'.encode('utf-8'))\n config.write(('--format ' + format + '\\n').encode('utf-8'))\n config.write(('--merge-output-format ' + merge_format + '\\n').encode('utf-8'))\n if all_subs:\n config.write('--all-subs\\n'.encode('utf-8'))\n config.write(('--sub-format ' + sub_format + '\\n').encode('utf-8'))\n config.write(('--convert-subs ' + convert_subs + '\\n').encode('utf-8'))\n\n config.seek(0)\n\n return flask.Response(config,\n mimetype = 'text/plain',\n headers = { 'Content-Disposition': 'attachment;filename=config.txt' }\n )", "def process_video(self, input_path, output_path, debug=False):\n clip = VideoFileClip(input_path)\n if debug:\n test_clip = clip.fl_image(self.process_image_debug)\n else:\n test_clip = clip.fl_image(self.process_image)\n test_clip.write_videofile(output_path)", "def copy_image_files(self) -> None:\n if self.images:\n if self.config.epub_fix_images or self.config.epub_max_image_width:\n if not Image:\n logger.warning(__('Pillow not found - copying image files'))\n super().copy_image_files()\n else:\n self.copy_image_files_pil()\n else:\n super().copy_image_files()", "def preparation(self):\n # [1] Makes a dir for saving results.\n # if 'Result' dir already exists,\n # a 'temporary' dir will be made.\n\n try:\n os.mkdir(self.dir_for_saving_result)\n except FileExistsError:\n self.viewer.display_message(\"Made a temporary directory.\")\n self.dir_for_saving_result = 'temporary'\n os.mkdir('temporary')\n\n # [2] Copies config file into the same dir as the one where results will be stored\n shutil.copy2(self.config_file_name, self.dir_for_saving_result)", "def make_mock_fs(self):\n\t\ttemp = tempfile.mkdtemp(prefix=\"fpltest\")\n\t\ttry:\n\t\t\tconfig = fplsync.Config()\n\t\t\tconfig.playlist_source = os.path.join(temp, \"fb2k_playlists\")\n\t\t\tconfig.source = os.path.join(temp, \"source\")\n\t\t\tconfig.dest = os.path.join(temp, \"dest\")\n\t\t\tconfig.playlist_dest = os.path.join(temp, \"playlists\")\n\t\t\tos.mkdir(config.source)\n\t\t\twith open(os.path.join(config.source, \"a.mp3\"), \"w\") as f:\n\t\t\t\tprint(\"a\" * 1000, file=f, end=\"\")\n\t\t\twith open(os.path.join(config.source, \"b.mp3\"), \"w\") as f:\n\t\t\t\tprint(\"b\" * 1000, file=f, end=\"\")\n\t\t\twith open(os.path.join(config.source, \"c.mp3\"), \"w\") as f:\n\t\t\t\tprint(\"c\" * 1000, file=f, end=\"\")\n\t\t\tos.mkdir(config.dest)\n\t\t\tos.mkdir(config.playlist_source)\n\t\t\tos.mkdir(config.playlist_dest)\n\t\t\tyield config\n\t\tfinally:\n\t\t\tshutil.rmtree(temp)", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def setup_part1(args):\n args.rootarg = args.root\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext == '':\n pass\n else:\n args.root = os.path.dirname(args.root)\n\n if args.root:\n args.root = os.path.abspath(args.root)\n if not os.path.isdir(args.root):\n if args.gallery:\n os.mkdir(args.root)\n else:\n error('Directory not found', args.root)", "def create_thumb(dirpath, imgname):\n with open(join(dirpath, imgname), 'r+b') as f:\n with Image.open(f) as image:\n thumb = resizeimage.resize_width(image, 50)\n thumbpath = join(outdir, thumbdir, imgname)\n print(\"saving thumbnail to\", thumbpath)\n thumb.save(thumbpath, image.format)", "def create_full(dirpath, imgname):\n with open(join(dirpath, imgname), 'r+b') as f:\n with Image.open(f) as image:\n fullpicpath = join(outdir, fullpicdir, imgname)\n print(\"saving full pic to\", fullpicpath)\n image.save(fullpicpath, quality=75, optimize=True)", "def convert_topic_dir(full_topic_dir, full_target_dir, photo_topic):\n warnings.simplefilter('error', Image.DecompressionBombWarning)\n topicfiles = [f for f in listdir(full_topic_dir) if isfile(join(full_topic_dir, f))\n and (f.lower().endswith(\".jpg\") or f.lower().endswith(\".jpeg\"))]\n for topicfile in topicfiles:\n fulltopicfile = join(full_topic_dir, topicfile)\n fulltargetfile = join(full_target_dir, topicfile)\n if not exists_and_newer(fulltargetfile, fulltopicfile):\n print(\" Converting\", topicfile, \": \", end='')\n try:\n im = Image.open(fulltopicfile)\n\n if im._getexif() is not None:\n exif = {\n TAGS[k]: v\n for k, v in im._getexif().items()\n if k in TAGS\n }\n else:\n exif = dict()\n\n if 'Orientation' in exif:\n im = apply_image_rotation_by_exif(im, exif['Orientation'])\n\n if 'ImageDescription' in exif:\n photo_description = exif['ImageDescription']\n photo_description = bytes(photo_description, encoding=\"ansi\", errors=\"ignore\").decode(\"utf-8\", errors=\"ignore\") # PIL reads exif data as ansi not utf-8 strings\n if photo_description.rstrip() == '':\n photo_caption = photo_topic\n else:\n if photo_description.endswith('#'):\n photo_caption = photo_topic + \" - \" + photo_description.rstrip('#')\n else:\n photo_caption = photo_description\n else:\n photo_caption = photo_topic\n\n print(photo_caption)\n im.thumbnail(targetSize, Image.ANTIALIAS)\n im = add_caption_to_image(im, photo_caption)\n im.save(fulltargetfile, \"JPEG\")\n except IOError:\n print(\"cannot create target for '%s'\" % fulltopicfile)\n except AttributeError:\n print(\"Attribute error for '%s'\" % fulltopicfile)\n else:\n print(\" Skipping\", topicfile)", "def prepare(self, dst, options):\n self.checkExisting(dst)\n self.makedirs(dst.parent())", "def process_video(filename, args, cfg, net):\n # Split video into frames\n images = split_video(filename)\n # Set output dir\n output_dir = args.output\n # Add brackets and extension to filename\n output_path = create_video_output_path(output_dir, cfg)\n # Get height and width of 1st image\n height, width, _ = check_img_size(images[0]).shape\n # Create VideoWriter object\n video = cv2.VideoWriter(output_path, \n cv2.VideoWriter_fourcc(*'FMP4'), \n cfg['video']['fps'], \n (width, height))\n for image in images:\n # Process frames\n img_steps = process_image(image, cfg, net)\n # Check for --show-detections flag\n output_img = check_if_adding_bboxes(args, img_steps) \n # Write to video\n video.write(output_img) \n # Release video writer object\n video.release()", "def CpSrcDest( IMGFOLDER = '..\\\\Data\\\\',\n EXTENSION = \".jpg\",\n DESTINATION_FOLDER='..\\\\Data\\\\AllBirds\\\\'):\n for i, imag in enumerate(BirdPhotos):\n filename= IMGFOLDER+ imag + EXTENSION\n cpCommand = \"copy \" + filename + \" \" + DESTINATION_FOLDER\n os.system(cpCommand)\n return", "def get_destination(metadata: Metadata):\n\n func = f\"{__name__}.get_destination\"\n\n metadata[\"destination\"] = metadata[\"full_clipname\"].replace(\n f\"/{app.capture_folder_name}/\", f\"/{app.destination_folder_name}/\"\n )\n\n if path.exists(metadata[\"destination\"]):\n metadata[\"renamed\"] = \"True\"\n print(f\"file exists:{metadata['destination']}\")\n directory = path.dirname(metadata[\"destination\"])\n filename = path.splitext(path.basename(metadata[\"destination\"]))[0]\n extension = path.splitext(path.basename(metadata[\"destination\"]))[1]\n number = 1\n dest = path.join(directory, f\"{filename}-{number:0>2d}{extension}\")\n metadata[\"destination\"] = dest\n print(f\"Will now test with:{dest}\")\n while path.exists(dest):\n number += 1\n dest = path.join(directory, f\"{filename}-{number:0>2d}{extension}\")\n print(f\"Will now test with:{dest}\")\n\n metadata[\"destination\"] = dest\n\n post_event(\n \"log_info\",\n f\"{func}\",\n f\"The destination is: {metadata['destination']} - was renamed: {metadata['renamed']}\",\n )\n\n return metadata", "def configure_devbox():\n\n # Does a devbox folder already exist?\n is_present = os.path.isdir(Settings.devbox_folder)\n\n # If so, alert the user and exit.\n if is_present:\n message1 = \"This folder is already initialized.\"\n message2 = \"To re-initialize, \"\n message2 += \"run '\" + Settings.program + \" destroy' first.\"\n Utilities.log(message1)\n Utilities.log(message2)\n exit(1)\n\n # Create the config folder in the current directory.\n Utilities.create_folder(Settings.devbox_folder)\n\n # Copy config files.\n source_folder = Settings.source_config\n dest_folder = Settings.config_folder\n folder_copied = Utilities.copy_folder(source_folder, dest_folder)\n\n # Report any errors.\n if not folder_copied:\n Utilities.log(\"Could not copy config files.\")\n exit(1)\n\n # Copy the Vagrantfile to the root of the devbox folder.\n source_file = Settings.config_folder + \"/vagrant/Vagrantfile\"\n dest_location = Settings.devbox_folder\n file_copied = Utilities.copy_file(source_file, dest_location)\n\n # Report any errors.\n if not file_copied:\n Utilities.log(\"Could not copy Vagrantfile.\")\n exit(1)", "def set(self):\n \n ffmpeg_installed = misc.askquestion(DialogTitle='FFMPEG Check',\n Question='Is FFMPEG installed?')\n \n if ffmpeg_installed:\n ffmpeg_dir = misc.get_dir(DialogTitle='Please select the directory where FFMPEG (binary) is installed:')\n \n if sys.platform=='win32':\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg.exe')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay.exe')\n else:\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay')\n \n if not os.path.exists(self.ffmpeg):\n print('Sorry, {0} does not exist!'.format(self.ffmpeg))\n return\n \n if not os.path.exists(self.ffplay):\n print('Sorry, {0} does not exist!'.format(self.ffplay))\n return\n \n else:\n self.ffmpeg = None\n self.ffplay = None\n \n # Save them to the default config file\n info = {'ffmpeg':self.ffmpeg, 'ffplay': self.ffplay}\n try:\n with open(self.config_file, 'w') as outFile:\n json.dump(info, outFile)\n print('Config information written to {0}'.format(os.path.abspath(self.config_file)))\n except PermissionError as e:\n curDir = os.path.abspath(os.curdir)\n print('Current directory: {0}'.format(curDir))\n print('Error: {0}'.format(e))\n \n return", "def create_output_directory_for_resized_images():\n\n try:\n if not os.path.isdir(RESIZED_NEGATIVE_PATH):\n return os.makedirs(RESIZED_NEGATIVE_PATH)\n elif not os.path.isdir(RESIZED_POSITIVE_PATH):\n return os.makedirs(RESIZED_POSITIVE_PATH)\n except OSError as e:\n print('Error --> {}'.format(e))", "def task():\n if os.path.isdir(orig):\n for fP in [ fP for fP in glob.glob(os.path.join(orig, '*-*/*')) if \\\n os.path.isdir(fP) ]:\n if not os.path.exists(dest + fP[len(orig):]):\n os.makedirs(dest + fP[len(orig):])\n for fP in [ fP for fP in glob.glob(os.path.join(orig, '*-*/*/%s.log' %fmt.get_date())) if \\\n os.path.isfile(fP) ]:\n convert(fP, dest + fP[len(orig):])", "def init_image_info():\n if not os.path.exists(UPLOAD_FOLDER):\n os.makedirs(UPLOAD_FOLDER)", "def preprocess(config_path, save_dir):\r\n \r\n cfg = mmcv.Config.fromfile(config_path)\r\n cfg.merge_from_dict({\r\n 'model.test_cfg.mode': 'slide', \r\n 'model.test_cfg.crop_size': (512, 512), \r\n 'model.test_cfg.stride': (384, 384)\r\n })\r\n\r\n # build the dataloader\r\n dataset = build_dataset(cfg.data.test)\r\n data_loader = build_dataloader(\r\n dataset,\r\n samples_per_gpu=1,\r\n workers_per_gpu=cfg.data.workers_per_gpu,\r\n dist=False,\r\n shuffle=False)\r\n\r\n # slide window and crop image\r\n cnt = 0\r\n for data in tqdm.tqdm(data_loader, desc=\"Processing\"):\r\n img = data['img'][0]\r\n img_meta = data['img_metas'][0].data[0]\r\n cnt += slide_crop(img, img_meta, cfg.model, save_dir)\r\n print(f'Preprocess finished, {cnt} binary files generated.')", "def raw(self, out, config, size, **filters):\r\n with vm2vm.raw.RawImage(out, \"w\") as img:\r\n img.mkfs(size)\r\n with vm2vm.raw.Mountpoint(img.name) as mnt:\r\n self.copy(dest=mnt, config=config, **filters)", "def make(config):\n # Create child folders\n for func in (create_basic_structure,\n copy_resources,\n copy_databases,\n copy_libraries,\n copy_security,\n copy_app_actions,\n copy_pages,\n create_application_info_file,\n replace_all_guids):\n\n INFO(\"\")\n INFO(\"+\"*70)\n INFO(\"\")\n func(config)", "def generate_video_from_frames(path_to_frames, title):\r\n mean_height = 0\r\n mean_width = 0\r\n num_of_images = load_one_setting(settings_filename, 'MAX_CYCLES')\r\n os.chdir(path_to_frames)\r\n '''Loading all frames'''\r\n for file in os.listdir('.'):\r\n if file.endswith(\".jpg\") or file.endswith(\".jpeg\") or file.endswith(\"png\") or file.endswith(\"JPEG\"):\r\n im = Image.open(file)\r\n width, height = im.size\r\n mean_width += width\r\n mean_height += height\r\n\r\n mean_width = int(mean_width / num_of_images)\r\n mean_height = int(mean_height / num_of_images)\r\n\r\n for file in os.listdir('.'):\r\n if file.endswith(\".jpg\") or file.endswith(\".jpeg\") or file.endswith(\"png\") or file.endswith(\"JPEG\"):\r\n im = Image.open(file)\r\n imResize = im.resize((mean_width, mean_height), Image.ANTIALIAS)\r\n imResize.save(file, 'JPEG', quality=95)\r\n release_video(title)\r\n os.chdir(r'../..')", "def main(argv):\n args = parse_command_line(argv)\n return convert_chunks_to_jpeg(args.raw_chunks_dir,\n jpeg_quality=args.jpeg_quality,\n slicing_plane=args.slicing_plane) or 0", "def create_links(list_of_paths, dest_dir, print_cfg_ipol=False):\n ms = False\n for i, f in enumerate(list_of_paths):\n\n if isinstance(f, tuple): # we have the ms image\n # tif ms\n ms = True\n symlink_p(f[1], os.path.join(dest_dir, 'im_ms_%02d.tif' % (i+1)))\n\n # preview ms\n tmp = copy_file_matching_pathname('PREVIEW_*.JPG', os.path.dirname(f[1]), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1)))\n # enhance contrast\n # os.system(\"/home/carlo/code/s2p/bin/qauto %s %s\" % (tmp, tmp)\n else:\n print('MS PREVIEW not found for %s' % f[1], file=sys.stderr)\n f = f[0] # the path to ms preview is not needed anymore\n\n # pan preview (if no ms preview)\n if not os.path.isfile(os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1))):\n tmp = copy_file_matching_pathname('PREVIEW_*.JPG', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1)))\n # os.system(\"/home/carlo/code/s2p/bin/qauto %s %s\" % (tmp, tmp))\n else:\n print('PAN PREVIEW not found for %s' % f, file=sys.stderr)\n\n # dim\n tmp = copy_file_matching_pathname('DIM_*.XML', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'dim_%02d.xml' % (i+1)))\n\n # rpc\n tmp = copy_file_matching_pathname('RPC_*.XML', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'rpc_%02d.xml' % (i+1)))\n\n # tif panchro\n symlink_p(f, os.path.join(dest_dir, 'im_panchro_%02d.tif' % (i+1)))\n\n # dzi 8 bits\n dzi8_found = False\n dzi8 = '%s_8BITS.dzi' % f[:-8] # remove extension '.JP2.TIF' (8 chars)\n files8 = '%s_8BITS_files' % f[:-8]\n if os.path.isfile(dzi8) and os.path.isdir(files8):\n symlink_p(dzi8, os.path.join(dest_dir, 'im_panchro_8BITS_%02d.dzi' % (i+1)))\n symlink_p(files8, os.path.join(dest_dir, 'im_panchro_8BITS_%02d_files' % (i+1)))\n dzi8_found = True\n\n # dzi 16 bits\n dzi16_found = False\n dzi16 = '%s_16BITS.dzi' % f[:-8] # remove extension '.JP2.TIF' (8 chars)\n files16 = '%s_16BITS_files' % f[:-8]\n if os.path.isfile(dzi16) and os.path.isdir(files16):\n symlink_p(dzi16, os.path.join(dest_dir, 'im_panchro_16BITS_%02d.dzi' % (i+1)))\n symlink_p(files16, os.path.join(dest_dir, 'im_panchro_16BITS_%02d_files' % (i+1)))\n dzi16_found = True\n\n # print warning if neither 8bit nor 16bit dzi was found\n if (not dzi8_found) and (not dzi16_found):\n print('WARNING: no dzi file found for img %s' % f, file=sys.stderr)\n\n if print_cfg_ipol:\n print_cfg.main(dest_dir, len(list_of_paths), ms)", "def generate_clips(input_dir, output_dir, duration=20, ext='.mkv'):\n \n i = [0,0,0,0]\n \n output_dirs = [os.path.join(output_dir, 'goals'),\n os.path.join(output_dir, 'bg'),\n os.path.join(output_dir, 'cards'),\n os.path.join(output_dir, 'subs')]\n\n for dir in output_dirs:\n if not os.path.exists(dir):\n os.mkdir(dir)\n print('Made directory ' + dir)\n \n \n for path in input_dir:\n print(path)\n \n with open(os.path.join(path, 'Labels.json')) as f:\n file = json.load(f)\n labels = parseLabels(file)\n \n # for each item in label, crop a specified length and save to output_dir\n for timestamp, label in labels:\n half = timestamp[0]\n time = timestamp[1]\n vid_name = os.path.join(path, half + ext)\n\n if time - 5 > 0:\n\n if label == 'soccer-ball':\n # collect an instance of a goal\n clip_video(vid_name, time - 5, duration, i[0], output_dirs[0])\n i[0] += 1\n\n # collect an instance of a non-goal, if it does not overlap with another event\n if event_overlap(labels, half, time - 45, duration) == False and time - 45 > 0:\n clip_video(vid_name, time - 45, duration, i[1], output_dirs[1])\n i[1] += 1\n\n elif 'card' in label:\n # collect an instance of a carding event\n clip_video(vid_name, time - 4, duration, i[2], output_dirs[2])\n i[2] += 1\n\n if event_overlap(labels, half, time - 45, duration) == False and time - 45 > 0:\n clip_video(vid_name, time - 45, duration, i[1], output_dirs[1])\n i[1] += 1\n\n\n elif 'substitution' in label:\n # collect an instance of a carding event\n clip_video(vid_name, time - 4, duration, i[3], output_dirs[3])\n i[3] += 1\n\n if event_overlap(labels, half, time - 45, duration) == False and time - 45 > 0:\n clip_video(vid_name, time - 45, duration, i[1], output_dirs[1])\n i[1] += 1\n\n print('Saved clip from ' + path)", "def _prepare(self):\n logging.info('-> copy configuration...')\n path_cofig = self.params['path_config_bUnwarpJ']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))\n if 'path_config_IJ_SIFT' in self.params:\n path_cofig = self.params['path_config_IJ_SIFT']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))\n if 'path_config_IJ_MOPS' in self.params:\n path_cofig = self.params['path_config_IJ_MOPS']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))", "def thumbnail_path(instance, filename):\n\n username = instance.user.username\n mainpath = os.path.join(\"infocomp\",username,\"thumbnails\",filename)\n return mainpath", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def mix_audio_and_video(self):\n\t\tself._logger.info('Starting ffmpeg...')\n\t\tsp.run('ffmpeg -v 0 -i {0} -i {1} -c:v copy '\n\t\t '-c:a aac -strict experimental {2}'\n\t\t .format(self.TEMP_FOLDER + self._video_manager.output_file_name,\n\t\t self.TEMP_FOLDER + self._audio_manager.output_file_name,\n\t\t self.SAVE_FOLDER + self._video_manager.output_file_name))\n\t\tself._logger.info('Save done. Output file: {}'\n\t\t .format(self._video_manager.output_file_name))\n\t\tself._logger.info('Cleaning the temp folder...')\n\t\tfor file in glob.glob(self.TEMP_FOLDER + '*'):\n\t\t\tos.remove(file)", "def take_snapshot(self, path, file_name):\n pipelie_state = self.player.get_state(1)\n p_state = pipelie_state.state\n if p_state not in (Gst.State.PLAYING, Gst.State.PAUSED):\n print(\"Stream is not ready\")\n else:\n try:\n sink = self.player.get_by_name('sink')\n sample = GstBase.BaseSink.get_last_sample(sink)\n image_buffer = Gst.Sample.get_buffer(sample)\n buffer_map = Gst.Buffer.map(image_buffer, Gst.MapFlags.READ)\n image_binary_data = bytearray(buffer_map.info.data)\n utils.store_image(image_binary_data, path, file_name + \".jpeg\")\n except:\n print(\"Capturing image failed.\")", "def main(raw_dir,save_dir,ext,target_size):\r\n try:\r\n msg = \"--target-size must be a tuple of 2 integers\"\r\n assert isinstance(target_size, tuple) and len(target_size) == 2, msg\r\n fnames = glob.glob(os.path.join(raw_dir, \"*.{}\".format(ext)))\r\n os.makedirs(save_dir, exist_ok=True)\r\n print(\"{} files to resize from directory `{}` to target size:{}\".format(len(fnames), raw_dir, target_size))\r\n for i, fname in enumerate(fnames):\r\n print(\".\", end=\"\", flush=True)\r\n img = cv2.imread(fname)\r\n img_small = cv2.resize(img, target_size)\r\n new_fname = \"{}.{}\".format(str(i), ext)\r\n small_fname = os.path.join(save_dir, new_fname)\r\n cv2.imwrite(small_fname, img_small)\r\n print(\"\\nDone resizing {} files.\\nSaved to directory: `{}`\".format(len(fnames), save_dir))\r\n except Exception as ex:\r\n print('Exception:',ex)", "def main():\n global MASK\n start_time = time()\n parser = initArgparse()\n args = parser.parse_args()\n dirtree = args.directorytree\n filetree = args.filetree\n meta = args.metadata\n newmeta = args.newmetadata\n sfv = args.sfv\n yes = args.yes\n MASK = args.exclude\n\n for i in args.DIRECTORY:\n if Path(i).exists() is True:\n basepath = Path(i)\n else:\n raise NotADirectoryError(f\"{i} does not exist\")\n default = False\n if dirtree == sfv == filetree == meta == newmeta is False:\n default = True\n if dirtree is True or default is True:\n dirtree_file = f\"{basepath.name}_directory_tree.txt\"\n checkFileExists(basepath, dirtree_file, yes)\n createDirectoryTree(basepath, dirtree_file)\n if sfv is True or default is True:\n sfv_file = f\"{basepath.name}.sfv\"\n checkFileExists(basepath, sfv_file, yes)\n createSfv(basepath, sfv_file)\n if filetree is True or default is True:\n csvtree_file = f\"{basepath.name}_file_tree.csv\"\n jsontree_file = f\"{basepath.name}_file_tree.json\"\n checkFileExists(basepath, jsontree_file, yes)\n checkFileExists(basepath, csvtree_file, yes)\n createFileTree(basepath, jsontree_file, csvtree_file)\n if meta is True or default is True:\n metadata_file = f\"{basepath.name}_metadata.json\"\n checkFileExists(basepath, metadata_file, yes)\n createMetadata(basepath, metadata_file)\n if newmeta is True:\n createNewMetadata(basepath)\n filesCache.cache_clear()\n getFileInfo.cache_clear()\n killTika()\n\n stop_time = time()\n print(f\"Finished in {round(stop_time-start_time, 2)} seconds\")", "def _queue_photos(self):\n global filtering_queue\n global number_of_pictures\n\n number_of_pictures = 0\n\n for root, dirs, files in walk(curdir):\n for file in files:\n if dirs == \"thumb\" or dirs == \"filtered\":\n pass\n else:\n file_path = join(root, file)\n filtering_data = [file_path, curdir]\n filtering_queue.put(filtering_data)\n number_of_pictures += 1\n print(\"Queued:\", file_path)\n\n try:\n mkdir(join(curdir, \"thumb\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"thumb\", \"Alexander\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"thumb\", \"Bjarke\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"thumb\", \"Gabrielle\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"thumb\", \"Monica\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"thumb\", \"Wenche\"))\n except FileExistsError:\n pass\n try:\n mkdir(join(curdir, \"filtered\"))\n except FileExistsError:\n pass", "def extract_frames_from_directory(count, source, destination):\n all_videos = os.listdir(source)\n print(all_videos)\n\n for video in all_videos:\n video_file = source + video # Retrieve a video from the OverHeadPress\n cap = cv2.VideoCapture(video_file) # capturing the video from the given path\n dim = (224, 224)\n\n while cap.isOpened():\n frame_id = cap.get(1) # current frame number\n ret, frame = cap.read()\n if not ret:\n break\n\n # We are capturing at 28 frames per second. \n # If we want to capture every 0.2 seconds we will take every 5 frames\n if frame_id % 8 == 0:\n filename =\"frame%d.jpg\" % count\n count+=1\n resized = cv2.resize(frame, dim)\n cv2.imwrite(destination + filename, resized)\n\n cap.release()\n print (\"Finished processing: \" + video + \". Ended at video: \" + str(count))", "def moveFiles(rootDir):\n\n homedir = os.environ['HOME']\n albumDirec = 'AlbumCoverImages'\n #Check if a directory exists\n if not os.path.isdir(os.path.join(homedir, 'Pictures', albumDirec)):\n print('AlbumCoverImages not found, trying to make...')\n os.makedirs(os.path.join(homedir, 'Pictures', albumDirec))\n \n for root, dirs, files in os.walk(rootDir, topdown=False):\n #print('testtest')\n for name in files:\n \n\n #Find image files, and move them to albumCoverImages\n #For some bullshit reason or statments won't work here, have to\n # parse this out to elif statements, ughhhh...\n \n if '.jpg' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec)))\n \n elif '.png' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.gif' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.pdf' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n\n else:\n try:\n #Use tinytag to get file metadata\n tag = TinyTag.get(os.path.join(root, name))\n artistName = tag.artist\n albumName = tag.album\n \n #TODO: Need to add more conditions\n if isinstance(artistName, str):\n artistName = artistName.replace('/', '_')\n\n elif isinstance(albumName, str):\n albumName.replace('/', '_')\n \n\n #Check if the artists directory exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName)):\n os.makedirs(os.path.join(rootDir, artistName))\n print('{0} directory made!'.format(artistName))\n \n except ValueError:\n print('ValueError with {0}'.format(root+'/'+name))\n continue\n\n except TypeError:\n print('TypeError with {0}'.format(root+'/'+name))\n continue\n\n #Check if the songs album exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName, albumName)):\n os.makedirs(os.path.join(rootDir, artistName, albumName))\n print('{0} directory made!'.format(albumName))\n \n except TypeError:\n print('TypeError with {0}! Look at album directory making.'.format(root+'/'+name))\n continue\n\n #TODO: Check if album is in artist direc, if not, move it\n\n #Check if song is in album, if not move it \n try:\n if os.path.isfile(os.path.join(rootDir, artistName, albumName, name)) == False:\n os.rename(os.path.join(root, name), os.path.join(rootDir, artistName, albumName, name))\n print('{0} moved to {1}!'.format(name, albumName))\n \n except TypeError:\n print('TypeError with file {0}! Look at line song moving'.format(root+'/'+name))\n continue\n \n #TODO: Check if this part works\n except LookupError:\n if (\".jpg\") or (\".png\") or (\".7z\") or (\"README\") or (\".zip\") in name:\n continue\n \n else:\n print('No reader support for {0}'.format(name))\n continue", "def main_convert():\n\n verbose = True\n\n # Build parser.\n parser = argparse.ArgumentParser()\n\n parser.add_argument('fname_pattern', action='store', help='File name pattern')\n parser.add_argument('-R', '--recursive', action='store_true', default=True,\n help='Search several subdirectories')\n\n # Run parser, extract arguments.\n args = parser.parse_args()\n\n # List of files.\n pattern = os.path.normpath(unicode(args.fname_pattern))\n\n if os.path.isdir(pattern):\n pattern = os.path.join(pattern, '*')\n fname_list = glob.glob(pattern)\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n else:\n fname_list = glob.glob(pattern)\n\n to_be_removed = []\n for f in fname_list:\n if os.path.isdir(f):\n to_be_removed.append(f)\n\n for f in to_be_removed:\n fname_list.remove(f)\n\n # Do the work.\n num_files = len(fname_list)\n for k, f_src in enumerate(fname_list):\n f_src = os.path.abspath(f_src)\n\n b_src, e = os.path.splitext(f_src)\n\n folder = os.path.basename(os.path.dirname(f_src))\n if (e == '.mp3' or e == '.wma' or e == '.wav' or e == '.aiff') and b_src != 'tmp' and folder != '.audio_convert':\n\n if verbose:\n try:\n print('%3d/%d: [%s -> .m4a] %s' % (k, num_files, e, os.path.basename(b_src)))\n except Exception as e:\n val = repr(f_src)\n raise Exception('Problem processing file: %s' % val)\n\n # Temporary working copy.\n path_work = os.path.dirname(f_src)\n f_tmp_src = os.path.join(path_work, 'tmp' + e)\n shutil.copy(f_src, f_tmp_src)\n\n # Transcode file format.\n f_tmp_dst = convert(f_tmp_src, verbose=verbose)\n\n # Finish.\n b_tmp_dst, e_dst = os.path.splitext(f_tmp_dst)\n\n f_dst = b_src + e_dst\n if os.path.isfile(f_dst):\n os.remove(f_dst)\n os.rename(f_tmp_dst, f_dst)\n\n if os.path.isfile(f_tmp_src):\n os.remove(f_tmp_src)\n\n if os.path.isfile(f_dst):\n move_processed_file(f_src)\n\n # Done.", "def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)", "def _copy_otto_files(self):\n\n # Copy files used by the container\n # Substitute name of the container in the configuration file.\n lxcdefaults = os.path.join(utils.get_base_dir(), \"lxc.defaults\")\n with open(os.path.join(lxcdefaults, \"config\"), 'r') as fin:\n with open(os.path.join(self.containerpath, \"config\"), 'w') as fout:\n for line in fin:\n lineout = line\n if \"${NAME}\" in line:\n lineout = line.replace(\"${NAME}\", self.name)\n elif \"${ARCH}\" in line:\n lineout = line.replace(\"${ARCH}\", self.arch)\n fout.write(lineout)\n\n dri_exists = os.path.exists(\"/dev/dri\")\n vga_device = utils.find_vga_device()\n with open(os.path.join(lxcdefaults, \"fstab\"), 'r') as fin:\n with open(os.path.join(self.containerpath, \"fstab\"), 'w') as fout:\n for line in fin:\n if line.startswith(\"/dev/dri\") and not dri_exists:\n lineout = \"# /dev/dri not found, entry disabled (\"\\\n \"do you use nvidia or fglrx graphics \"\\\n \"drivers?)\\n\"\n lineout += \"#\" + line\n else:\n lineout = line\n fout.write(lineout)\n\n src = os.path.join(lxcdefaults, \"scripts\")\n dst = os.path.join(self.containerpath, \"tools\", \"scripts\")\n with ignored(OSError):\n shutil.rmtree(dst)\n shutil.copytree(src, dst)\n utils.set_executable(os.path.join(dst, \"pre-start.sh\"))\n utils.set_executable(os.path.join(dst, \"pre-mount.sh\"))\n utils.set_executable(os.path.join(dst, \"post-stop.sh\"))\n\n src = os.path.join(lxcdefaults, \"guest\")\n dst = os.path.join(self.containerpath, \"tools\", \"guest\")\n with ignored(OSError):\n shutil.rmtree(dst)\n shutil.copytree(src, dst)\n\n # Some graphics need a proprietary driver\n # driver -> packages to install\n drivers = {\n \"fglrx\": \"fglrx\",\n \"fglrx_pci\": \"fglrx\",\n \"nvidia\": \"nvidia-current\"\n }\n if vga_device is not None and \"Driver\" in vga_device:\n if vga_device[\"Driver\"] in drivers:\n logging.info(\"Installing additional drivers for graphics \"\n \"card {}\".format(vga_device[\"Device\"]))\n # l-h-g must be installed to compile additional modules\n pkgs = \"linux-headers-generic {}\\n\".format(\n drivers[vga_device[\"Driver\"]])\n # TODO: this shouldn't be in the guest directory\n pkgsdir = os.path.join(self.containerpath, \"tools\", \"guest\", \"var\", \"local\", \"otto\", \"config\")\n if not os.path.exists(pkgsdir):\n os.makedirs(pkgsdir)\n with open(os.path.join(pkgsdir, \"00drivers.pkgs\"), 'w') as fpkgs:\n logging.debug(\"Custom drivers written to {}\".format(\n os.path.join(pkgsdir, \"00drivers.pkgs\")))\n fpkgs.write(pkgs)", "def move_files():\r\n if \"defaultdirectory\" in data:\r\n print(\"\\n*** Default video source directory:\", data[\"defaultdirectory\"])\r\n srcdir = input(\"Enter '1' to use default video source directory\\n\"\r\n \"Otherwise, please enter the full path where your videos are located.\\n\"\r\n \"Example: C:\\\\user\\\\downloads\\\\ \\n\").strip()\r\n if srcdir.startswith('1'):\r\n srcdir = data[\"defaultdirectory\"]\r\n else:\r\n srcdir = input(\"Please enter the full path where your videos are located.\\n\"\r\n \"Example: C:\\\\user\\\\downloads\\\\ \\n\").strip()\r\n while not os.path.isdir(srcdir):\r\n srcdir = input(\"\\n*** Invalid directory. Please enter the full path where your \"\r\n \"videos are located.\\n Example: C:\\\\user\\\\downloads\\\\ \\n\").strip()\r\n data[\"defaultdirectory\"] = srcdir\r\n save_json()\r\n print()\r\n for filename in os.listdir(srcdir):\r\n # Only look for folders/files in the format \"S##E##\". Example: \"X-Files S01E02\"\r\n if re.search('[sS]\\\\d{2}[eE]\\\\d{2}', filename):\r\n found = False\r\n filepath = os.path.join(srcdir, filename)\r\n name = filename.lower()\r\n for key in data:\r\n keywords = key.split()\r\n if all(word in name for word in keywords):\r\n found = True\r\n try:\r\n shutil.move(filepath, data[key])\r\n except Exception as e:\r\n print(\"*** Error with {}\".format(filename))\r\n print(\"The file might be open in another program\")\r\n print(repr(e))\r\n else:\r\n print(\"*** Moved:\", (filename[:41] + \"...\") if len(filename) > 44 else filename)\r\n break\r\n if not found:\r\n # Prints the shows that matched the episode formatting, but were not\r\n # configured to be processed\r\n print(\"* Not processed:\", (filename[:35] + '...') if len(filename) > 38 else filename)", "def cp_dir_or_files(self):\n if self.recursive:\n if self.cmdtype == 'upload' and not self.srcpath.endswith(os.path.sep):\n basename = os.path.basename(self.srcpath)\n self.destpath = join_obs_path(self.destpath, basename)\n elif self.cmdtype == 'download' and not self.srcpath.endswith('/'):\n bucket, key = split_bucket_key(self.srcpath)\n basename = key.split('/')[-1]\n if basename:\n self.destpath = os.path.join(self.destpath, basename)\n elif not self.srcpath.endswith('/'):\n bucket, key = split_bucket_key(self.srcpath)\n basename = key.split('/')[-1]\n if basename:\n self.destpath = join_obs_path(self.destpath, basename)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dryrun', '-n', action='store_true',\n help=('check TileDB/SSH files differences only, '\n 'does not perform any copy'))\n parser.add_argument('--debug', '-d', action='store_true',\n help=('prints debug messages'))\n parser.add_argument('--tdmq-url', action='store', type=str, required=True,\n dest='tdmq_url',\n help=('tdmq server and path of the form'))\n parser.add_argument('--tdmq-auth-token', action='store', type=str, required=True,\n dest='tdmq_auth_token',\n help=('tdmq server authorization token'))\n parser.add_argument('--ssh-url', action='store', type=str, required=True,\n dest='ssh_url',\n help=(\n 'ssh server and path of the form: '\n '<USER>@<NAME_NODE>:<PORT>/PATH'))\n parser.add_argument('--ssh-key', action='store', type=str, required=True,\n dest='ssh_key',\n help=('key for ssh server authentication'))\n parser.add_argument('--desc-file', action='store', type=str, required=True,\n dest='source_desc_file',\n help=('source descrption file'))\n\n # Only one of --hours and --sync can be provided on command line\n sync_group = parser.add_mutually_exclusive_group()\n sync_group.add_argument('--hours', action='store',\n dest='hours', default=24, type=int,\n help=('uploads only the radar images '\n 'more recent than the given number of hours'))\n sync_group.add_argument('--sync', '-s', action='store_true',\n dest='sync',\n help=('upload all the missing radar images'))\n\n args = parser.parse_args()\n\n # If the debug flag is set, print all messages\n if args.debug:\n logging.basicConfig(\n level=logging.DEBUG,\n format='[%(levelname)s] %(message)s')\n else:\n logging.basicConfig(\n level=logging.INFO,\n format='[%(levelname)s] %(message)s')\n\n logging.getLogger(\"paramiko\").setLevel(logging.WARNING)\n\n (_ssh_username, _ssh_hostname, _ssh_port,\n _ssh_root) = check_ssh_url(args.ssh_url)\n if _ssh_hostname is None:\n logging.error(\n 'Wrong, incomplete or absent SSH path: \\'%s\\'', args.ssh_url)\n sys.exit(1)\n\n if os.path.isfile(args.ssh_key) == False:\n logging.error(\n 'SSH key file not found: \\'%s\\'', args.ssh_key)\n sys.exit(1)\n\n if os.path.isfile(args.source_desc_file) == False:\n logging.error(\n 'Source description file not found: \\'%s\\'', args.source_desc_file)\n sys.exit(1)\n\n _source_desc = load_description(args.source_desc_file)\n\n ssh_client = SSHClient(\n username=_ssh_username,\n hostname=_ssh_hostname,\n port=_ssh_port,\n key_file=args.ssh_key,\n root_dir=_ssh_root\n )\n\n _folder_list = ssh_client.list_folder()\n\n def _name_filter(file_name):\n # Is a radar image file\n if re.match(r'cag01est2400\\d{4}-\\d{2}-\\d{2}_\\d{2}:\\d{2}:\\d{2}.png', file_name):\n return True\n else:\n return False\n\n # Filter out not image files\n _image_list = list(filter(_name_filter, _folder_list))\n\n # Instantiates a TDMQ client, retrieves the source if exists or registers a\n # new one\n tdmq_client = Client(args.tdmq_url, args.tdmq_auth_token)\n sources = tdmq_client.find_sources({'id': _source_desc['id']})\n if len(sources) > 0:\n assert len(sources) == 1\n source = sources[0]\n logging.info(f\"Using source {source.tdmq_id} for {source.id}.\")\n else:\n source = tdmq_client.register_source(_source_desc)\n logging.info(f\"Created source {source.tdmq_id} for {source.id}.\")\n\n try:\n ts = source.timeseries()\n times = ts.time\n last_image_time = max(sorted(times))\n _last_slot = max(ts.tiledb_indices)\n except Exception as ex: # FIXME too general\n times = []\n last_image_time = datetime.datetime(1970, 1, 1, 0, 0, 0)\n _last_slot = 0\n\n # Builds the list of file to download\n if args.sync:\n _images_to_ingest = ingest_missings(_image_list, times)\n else:\n start_time = (\n datetime.datetime.now() - datetime.timedelta(hours=args.hours)\n ).replace( minute=0, second=0, microsecond=0)\n\n logging.info(f\"Requested images from {start_time} (last local image is {last_image_time}).\")\n if start_time > last_image_time:\n last_image_time = start_time\n\n _images_to_ingest = ingest_latests(last_image_time, _image_list)\n\n logging.info(\n f\"Remote files: {len(_folder_list)}, remote images: \"\n f\"{len(_image_list)}, images to sync: {len(_images_to_ingest)}.\")\n\n for _image in _images_to_ingest:\n _timestamp = datetime.datetime.strptime(\n _image, 'cag01est2400%Y-%m-%d_%H:%M:%S.png')\n _last_slot = _last_slot + 1\n\n if args.dryrun:\n logging.debug(f\"[DRY-RUN] Ingesting data at time {_timestamp}, slot {_last_slot}.\")\n else:\n logging.debug(f\"Ingesting data at time {_timestamp}, slot {_last_slot}.\")\n _data = fetch_radar_data(ssh_client, _image)\n source.ingest(_timestamp, _data, _last_slot)\n logging.info(f\"Done ingesting.\")", "def get_train_video(opt, frame_path, Total_frames):\n clip = []\n i = 0\n loop = 0\n\n # choosing a random frame\n if Total_frames <= opt.sample_duration: \n loop = 1\n start_frame = 0\n else:\n start_frame = np.random.randint(0, Total_frames - opt.sample_duration)\n \n if opt.modality == 'RGB': \n while len(clip) < opt.sample_duration:\n try:\n im = Image.open(os.path.join(frame_path, '%05d.jpg'%(start_frame+i+1)))\n clip.append(im.copy())\n im.close()\n except:\n print('ERROR no such image {}'.format(os.path.join(frame_path, '%05d.jpg'%(i+1))))\n i += 1\n \n if loop==1 and i == Total_frames:\n i = 0\n\n elif opt.modality == 'Flow': \n while len(clip) < 2*opt.sample_duration:\n try:\n im_x = Image.open(os.path.join(frame_path, 'TVL1jpg_x_%05d.jpg'%(start_frame+i+1)))\n im_y = Image.open(os.path.join(frame_path, 'TVL1jpg_y_%05d.jpg'%(start_frame+i+1)))\n clip.append(im_x.copy())\n clip.append(im_y.copy())\n im_x.close()\n im_y.close()\n except:\n pass\n i += 1\n \n if loop==1 and i == Total_frames:\n i = 0\n \n elif opt.modality == 'RGB_Flow':\n while len(clip) < 3*opt.sample_duration:\n try:\n im = Image.open(os.path.join(frame_path, '%05d.jpg'%(start_frame+i+1)))\n im_x = Image.open(os.path.join(frame_path, 'TVL1jpg_x_%05d.jpg'%(start_frame+i+1)))\n im_y = Image.open(os.path.join(frame_path, 'TVL1jpg_y_%05d.jpg'%(start_frame+i+1)))\n clip.append(im.copy())\n clip.append(im_x.copy())\n clip.append(im_y.copy())\n im.close()\n im_x.close()\n im_y.close()\n except:\n pass\n i += 1\n \n if loop==1 and i == Total_frames:\n i = 0\n return clip", "def texlive_install():\n orDir = os.getcwd()\n system = platform.system()\n if system == 'Windows':\n print('A Windows system has been detected.')\n default_dir = '/texlive/'\n elif system == 'Linux':\n print('A Linux system has been detected.')\n default_dir = '/usr/share/texlive'\n else:\n print(\"System '{}' is not known. Aborting.\".format(system))\n raise SystemExit()\n\n print('Looking for configuration file...')\n # If available, the default root path is used\n if os.path.isdir(default_dir):\n os.chdir(default_dir)\n else:\n os.chdir('/')\n # Then we look for the config file\n for root, dirs, files in os.walk(os.getcwd()):\n if 'texmf.cnf' in files:\n filePath = os.path.join(root,'texmf.cnf')\n print('Configuration file found at {}'.format(filePath))\n break\n if filePath is None:\n print('Warning: no configuration file has been found. Continuing with default settings...')\n filePath = default_dir\n\n # And now for the actual local texmf folder, which is created if not already there\n os.chdir(os.path.join(root,'..'))\n texmf_dir = os.path.join(os.getcwd(),'texmf-local')\n f_chdir(texmf_dir)\n new_dir = 'tex'\n f_chdir(new_dir)\n new_dir = 'latex'\n f_chdir(new_dir)\n\n # template files are copied\n new_dir = 'imta'\n f_chdir(new_dir)\n templatePath = os.getcwd()\n copy_source_files(orDir,templatePath)\n\n \n output = subprocess.check_output(\"tlmgr conf auxtrees add {}\".format(texmf_dir), \n shell=True).decode('utf-8')\n \n print('Configuration file updated')\n # original directory is restored\n f_chdir(orDir)", "def copy_facemap_roi(procfile, videofile, outputfile=None):\n videodata = np.load(procfile, allow_pickle=True).item() \n videodata['filenames'] = [[videofile]]\n if outputfile is None:\n outputfile = os.path.splitext(videofile)[0]+'_proc.npy'\n if os.path.isfile(outputfile):\n print(f'File {outputfile} exists. It will not be overwritten.')\n return None\n np.save(outputfile, videodata)\n return outputfile", "def process_videos(chapter_info):\n\n print(\"Processing chapter_info:\", chapter_info)\n\n # getting creation time of the first chapter\n # TODO update when adding multiple directory proccessing\n os.chdir(DIR_VIDEO_FILES)\n print(\"1st chapter\", chapter_info[1][0])\n chap1_time = time.strftime(\n r\"%Y-%m-%d_%H-%M\", time.localtime(os.path.getctime(chapter_info[1][0])))\n print(\"1st chapter creation\", chap1_time)\n\n # output_file = f\"M_GH00{chapter_info[0]}_{chap1_time}.MP4\"\n output_file = f\"{chap1_time}_GH00{chapter_info[0]}_MRG.MP4\"\n if os.path.isfile(output_file):\n print(f\"Chapter already processed, found file: {output_file}\")\n return\n\n # preparing text file containing file list for merging (for ffmpeg)\n video_list_file = chapter_info[0] + \"_merge.txt\"\n with open(video_list_file, \"w\") as f:\n for video_chapter in chapter_info[1]:\n f.write(f\"file {video_chapter}\\n\")\n\n command = f\"{FFMPEG_EXE} -f concat -i {video_list_file} -c copy {DIR_OUTPUT}{output_file}\"\n print(\"command =\", command)\n # p = subprocess.run(\"dir\", shell=True, capture_output=True)\n # p = subprocess.run(\"dir\", shell=True, stdout=subprocess.PIPE, text=True)\n p = subprocess.run(command, stdout=subprocess.PIPE, text=True)\n print(\"returncode =\", p.returncode)\n # print(\"stdout =\", p.stdout)\n os.remove(video_list_file) # remove file list after merging\n # rename original chapters after processing\n for video_chapter in chapter_info[1]:\n os.rename(video_chapter, f\"OK_{video_chapter}\")", "def main():\n parser = argparse.ArgumentParser(description=\"Tracks adult fish\")\n # add options for argument parser\n parser.add_argument(\"in_path\",\n help=\"Path to the video directory.\")\n parser.add_argument(\"out_path\",\n help=\"Directory for results. Should be empty.\")\n parser.add_argument(\"-x\", \"--keep_temp\", action=\"store_true\",\n help=\"Keep temporary folder after execution.\")\n parser.add_argument(\"--visual\", action=\"store_true\",\n help=\"shows a visual representation of the tracking progress.\")\n\n # parse arguments from command line\n args = parser.parse_args()\n # get all file names and directories ready\n out_dir, temp_dir, video_bases, videos = housekeeping(args)\n borders = []\n for i in range(len(videos)):\n v = videos[i]\n get_borders(borders, temp_dir, v)\n\n for i in range(len(videos)):\n vbn = video_bases[i]\n v = videos[i]\n scaled_video = \"scaled_\" + vbn + \".avi\"\n ffmpeg = Ffmpeg(v, os.path.join(temp_dir, scaled_video))\n ffmpeg.f = \"avi\"\n ffmpeg.vcodec = \"libx264rgb\"\n ffmpeg.width = 480\n ffmpeg.run()\n\n for i in range(len(videos)):\n vbn = video_bases[i]\n pts = tracker(args, temp_dir, vbn)\n border = borders[i]\n tracks_lower, tracks_upper = split_tracks(border, pts)\n analysis = Analysis(tracks_lower, tracks_upper, px_size=0.06)\n analysis.analyze(os.path.join(out_dir, 'stats.txt'), vbn, vel=True)\n\n if not args.keep_temp:\n shutil.rmtree(temp_dir)", "def fileCopyToMorph():\r\n print(str(self.copyFilePath))\r\n print(str(self.morphPath))\r\n \"\"\"copyPath = self.createDir + self.name + \"-\" + self.method\r\n print(str(copyPath))\r\n \r\n os.system(copyPath)\"\"\"\r\n os.system(self.copyFilePath)\r\n print(\"Burada sorun yok\")", "def seperate_dog_cat(src, dst):\n imgs = [f for f in os.listdir(src) if os.path.isfile(os.path.join(src, f)) and not f.startswith('.')]\n \n dst_dog = os.path.join(dst, 'dog')\n dst_cat = os.path.join(dst, 'cat')\n if not os.path.exists(dst_dog):\n os.makedirs(dst_dog)\n if not os.path.exists(dst_cat):\n os.makedirs(dst_cat)\n \n for img in imgs:\n if 'dog' in img:\n move(os.path.join(src, img), dst_dog)\n if 'cat' in img:\n move(os.path.join(src, img), dst_cat)\n print('seperate done')", "def form_sample_folder(self, input_folder, target_folder, sample_name):\n print(f'processing {sample_name} folder.')\n # first make a subfolder to contain the images - e.g. 'target_folder/sample_name'\n sample_dir = join(target_folder, sample_name)\n if not os.path.exists(sample_dir):\n mkdir(sample_dir)\n # resize and move the mask images - e.g. 'target_folder/sample_name/imgs_necrosis.png'\n img_file_nec = join(input_folder, 'Necrosis',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_nec, self.rescale_ratio)\n img_nec = img_res.copy()\n cv2.imwrite(join(sample_dir, 'necrosis.png'), img_res)\n\n img_file_perf = join(input_folder, 'Perfusion',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_perf, self.rescale_ratio)\n cv2.imwrite(join(sample_dir, 'perfusion.png'), img_res)\n\n # resize and move the maker HE and EF5 images\n files = listdir(input_folder)\n img_files = [x for x in files if x.split(\n '.')[-1] in ('tif', 'jpg', 'png')]\n for img_file in img_files:\n if (sample_name+'_' in img_file) or (sample_name+'-' in img_file):\n if ('HE-G' in img_file) or ('HE-green' in img_file) or ('HEgreen' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-green.png')):\n cv2.imwrite(join(sample_dir, 'HE-green.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-R' in img_file) or ('HE-red' in img_file) or ('HEred' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-red.png')):\n cv2.imwrite(join(sample_dir, 'HE-red.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-B' in img_file) or ('HE-blue' in img_file) or ('HE-blue' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-blue.png')):\n cv2.imwrite(join(sample_dir, 'HE-blue.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif 'EF5' in img_file:\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n img_ef5 = img_res.copy()\n if not os.path.exists(join(sample_dir, 'EF5.png')):\n cv2.imwrite(join(sample_dir, 'EF5.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n\n masked_ef5 = (img_ef5 * (img_nec <= 0)).astype(img_ef5.dtype)\n cv2.imwrite(join(sample_dir, 'EF5_masked.png'), masked_ef5)\n assert len(listdir(sample_dir)) == 7\n return", "def gen_thumb(video_path, thumb_path):\n if os.path.isfile(thumb_path):\n os.remove(thumb_path)\n\n global THUMB_SIZE\n cmd = ['ffmpeg', '-itsoffset', '-5', '-i', video_path, '-vframes', '1', '-f', 'apng', '-s', THUMB_SIZE, thumb_path]\n p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n output = p.communicate()[1]\n\n duration = search_duration_from_text(output)\n if not duration:\n tlog = get_logger(current_thread().name)\n tlog.error(\"Failed to find duration for {0}\".format(video_path))\n duration = 0\n\n return p.returncode == 0, duration", "def upload():\n run('mkdir -p /srv/images/'+env.project_name+'/')\n rsync_project(\n env.project_dir, './',\n exclude=(\n '.git', '.gitignore', '__pycache__', '*.pyc', '.DS_Store', 'environment.yml',\n 'fabfile.py', 'Makefile', '.idea', 'bower_components', 'node_modules',\n '.env.example', 'README.md', 'var'\n ), delete=True)", "def setup_rawpath(job, raw_path):\n\n logging.info(f\"Destination is {raw_path}\")\n if not os.path.exists(raw_path):\n try:\n os.makedirs(raw_path)\n except OSError:\n err = f\"Couldn't create the base file path: {raw_path}. Probably a permissions error\"\n logging.error(err)\n else:\n logging.info(f\"{raw_path} exists. Adding timestamp.\")\n raw_path = os.path.join(str(job.config.RAW_PATH), f\"{job.title}_{job.stage}\")\n logging.info(f\"raw_path is {raw_path}\")\n try:\n os.makedirs(raw_path)\n except OSError:\n err = f\"Couldn't create the base file path: {raw_path}. Probably a permissions error\"\n raise OSError(err) from OSError\n return raw_path", "def processVideo(inputRootFolder='/home/pi/Logging/UnprocessedVideo',\n outputFolder='/home/pi/Logging/Unsent',\n cam_framerate=10,\n delay=15):\n logging.info('Processing videos')\n sleep(delay) # hardcoded sleep function to ensure that the video has finished saving\n # Create directories if necessary\n try:\n os.makedirs(inputRootFolder)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise # This was not a \"directory exists\" error\n try:\n os.makedirs(outputFolder)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise # This was not a \"directory exists\" error\n # Get the list of subdirectories\n f = []\n for (dirpath, dirnames, filenames) in os.walk(inputRootFolder):\n f.extend(dirnames)\n # Go through each subdirectory\n for folder in f:\n folderName = os.path.join(inputRootFolder,folder)\n videoListName = '%s/videoList.txt' % folderName #file that will contain list of videos\n videoList = io.open(videoListName, 'w')\n for fileName in sorted(os.listdir(folderName)): #add each video in the folder to the file\n if (fileName.startswith('Video')):\n videoString = (\"file '%s/%s'\\n\" % (folderName, fileName))\n videoList.write(videoString)\n videoList.close()\n outputFile = '%s/%s.mp4' % (outputFolder, folder)\n #concatenate the videos\n subprocess.call(['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i',\n videoListName, '-c', 'copy', outputFile], shell=False)\n shutil.rmtree(folderName, ignore_errors=True) #delete the folder\n logging.info('Processed videos')", "def _videoclipupload(request, hash_key):\n # get video\n video_queryset = Video.objects.all().select_related('owner')\n video = get_object_or_404(video_queryset, hash_key=hash_key)\n \n if request.method == 'POST':\n \n # create a form instance and populate it with data from the request\n form = UploadClipForm(request.POST, request.FILES)\n \n # check whether it's valid\n if form.is_valid():\n # process the data in form.cleaned_data as required\n number = form.cleaned_data['formatted_number'].as_e164\n name = form.cleaned_data['name']\n clip = form.cleaned_data['clip']\n \n # get location of script\n script_dir = os.path.dirname(os.path.abspath(__file__))\n script_path = os.path.join(script_dir, \"process.sh\")\n \n # get locations of input and output files\n input_video = clip.temporary_file_path()\n output_video = NamedTemporaryFile(suffix='.mp4')\n output_image = NamedTemporaryFile(suffix='.jpg')\n output_duration = NamedTemporaryFile()\n \n # get the ffmpeg path\n # on production we had to install the 64 bit static ffmpeg build\n ffmpeg_path = ''\n if not settings.DEBUG:\n ffmpeg_path = '/home/nceruchalu/bin/'\n \n # Generate the output files\n subprocess.check_call(\n [script_path, input_video, output_video.name, \n output_image.name, output_duration.name, ffmpeg_path])\n \n # Get clip duration from the results\n clip_duration = map(float, output_duration)[0]\n \n # if we've come this far then all is well, and we can go ahead\n # and create this clip\n user = User.objects.get_user_by_number(number)\n if not user.is_active and name:\n user.full_name = name\n user.save()\n \n # first add user as a video user, because nobody should\n # add to a clip without being a video user\n VideoUsers.objects.add_users_to_video(video, user)\n \n # Try creating the clip with a thumbnail, if that fails try\n # without the thumbnail\n Clip.objects.create(owner=user,\n video=video,\n mp4=File(output_video),\n photo=ImageFile(output_image),\n duration=clip_duration)\n \n # close and delete the tempoary files\n output_video.close()\n output_image.close()\n output_duration.close()\n \n # redirect to the video details URL but add a querystring param\n # indicating that the video should start from the last clip\n video_url = reverse('web-video-detail',kwargs={'hash_key':hash_key})\n redirect_url = \"{video_url}?latest=1\".format(video_url=video_url)\n \n return HttpResponseRedirect(redirect_url)\n \n else:\n # if a GET (or any other method) we'll create a blank form\n form = UploadClipForm()\n \n return render_to_response('video/upload.html',\n {'video':video,\n 'form':form},\n context_instance=RequestContext(request))", "def ffmpeg_subclip_video_file(filename, t1, t2):\n subprocess.call(['ffmpeg', '-i', filename, '-ss', str(t1), '-to', str(t2), '-c', 'copy', '-y', filename.split('.')[0] + '_subclip.mp4'])\n return" ]
[ "0.5893241", "0.57616824", "0.5529838", "0.55258685", "0.55187756", "0.55067265", "0.5499056", "0.54705536", "0.5458571", "0.5449385", "0.54415053", "0.53093326", "0.5306078", "0.53042513", "0.5287746", "0.5274733", "0.5265946", "0.5246467", "0.52405924", "0.5228054", "0.5227246", "0.5223553", "0.5209518", "0.519676", "0.51964355", "0.5181166", "0.51608443", "0.51529205", "0.5132727", "0.5124742", "0.5107104", "0.50845337", "0.5082515", "0.5072183", "0.5070495", "0.50600946", "0.50590765", "0.50343186", "0.50277627", "0.50061774", "0.4998831", "0.4998305", "0.49927324", "0.49895415", "0.49807692", "0.4974426", "0.49729759", "0.49677858", "0.49517065", "0.49508423", "0.49467623", "0.49452364", "0.49408785", "0.4939306", "0.49368545", "0.4921772", "0.49135807", "0.4910351", "0.4909662", "0.49039823", "0.49015692", "0.48944283", "0.4883725", "0.4881896", "0.48783296", "0.48766387", "0.4868423", "0.48574197", "0.4846574", "0.4846177", "0.48435035", "0.48393098", "0.48357633", "0.48330018", "0.483269", "0.48303634", "0.48266", "0.4822527", "0.48196164", "0.48173836", "0.48142824", "0.4808503", "0.48064017", "0.48062852", "0.48038638", "0.47889832", "0.4785968", "0.47830164", "0.47810307", "0.47772652", "0.4774487", "0.47733688", "0.47721803", "0.47697154", "0.47684667", "0.47670767", "0.47661275", "0.47639725", "0.4760354", "0.4756789" ]
0.57895535
1
Read command line argument runtype Throws error if the choice is not correct
def parseArg(): try: parser = argparse.ArgumentParser( description='This is the Scheduler export utility') parser.add_argument('--weekofmonth', action='store', dest='weekofmonth', required=True, help='select the week of month[1-5]', type = int, choices=[1,2,3,4,5]) parser.add_argument('--dayofweek', action='store', dest='dayofweek', help='select day of week [1-7]', type=int, choices=[1,2,3,4,5,6,7]) args = parser.parse_args() global WEEK global DAY WEEK = args.weekofmonth DAY = args.dayofweek log.info("1;EME;RUNNING;000;Scheduler.py;;;;;STARTING " + os.path.basename(__file__)) schedule(WEEK, DAY) except Exception as e: log.exception("1;EME;FAILURE;700;STARTUP ERROR " + str(e), exc_info=False) sys.exit(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readArgs():\n args = sys.argv\n if len(args) != 3:\n print(\"ERROR - Wrong number of arguments! \\n\")\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n exit(5)\n if args[1] != \"MTS\" and args[1] != \"SCH\":\n print(\"ERROR - Wrong type specified! : \" + args[1])\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n return args", "def cmd_type(args):", "def read_cmd_args():\n\n if len(sys.argv) != 9:\n print(\"[ERR] Invalid number of command line arguments!\")\n usage()\n sys.exit(1)\n\n # FCST_SYR\n try:\n fcst_syr = int(sys.argv[1])\n except ValueError:\n print(f\"[ERR] Invalid argument for FCST_SYR! Received {(sys.argv[1])}\")\n usage()\n sys.exit(1)\n if fcst_syr < 0:\n print(f\"[ERR] Invalid argument for FCST_SYR! Received {(sys.argv[1])}\")\n usage()\n sys.exit(1)\n\n # FCST_EYR\n try:\n fcst_eyr = int(sys.argv[2])\n except ValueError:\n print(f\"[ERR] Invalid argument for FCST_EYR! Received {(sys.argv[2])}\")\n usage()\n sys.exit(1)\n if fcst_eyr < 0:\n print(f\"[ERR] Invalid argument for FCST_EYR! Received {(sys.argv[2])}\")\n usage()\n sys.exit(1)\n\n # MONTH_ABBR\n month_abbr = str(sys.argv[3])\n\n # MONTH_NUM\n try:\n month_num = int(sys.argv[4])\n except ValueError:\n print(f\"[ERR] Invalid argument for MONTH_NUM! Received {(sys.argv[4])}\")\n usage()\n sys.exit(1)\n if month_num < 1:\n print(f\"[ERR] Invalid argument for MONTH_NUM! Received {(sys.argv[4])}\")\n usage()\n sys.exit(1)\n if month_num > 12:\n print(f\"[ERR] Invalid argument for MONTH_NUM! Received {(sys.argv[4])}\")\n usage()\n sys.exit(1)\n\n # FCST_TYPE\n fcst_type = str(sys.argv[5])\n\n # LEAD_MONTHS\n try:\n lead_months = int(sys.argv[6])\n except ValueError:\n print(f\"[ERR] Invalid argument for LEAD_MONTHS! Received {(sys.argv[6])}\")\n usage()\n sys.exit(1)\n if lead_months < 0:\n print(f\"[ERR] Invalid argument for LEAD_MONTHS! Received {(sys.argv[6])}\")\n usage()\n sys.exit(1)\n\n # ENS_NUM\n try:\n ens_num = int(sys.argv[7])\n except ValueError:\n print(f\"[ERR] Invalid argument for ENS_NUM! Received {(sys.argv[7])}\")\n usage()\n sys.exit(1)\n if ens_num < 0:\n print(f\"[ERR] Invalid argument for ENS_NUM! Received {(sys.argv[7])}\")\n usage()\n sys.exit(1)\n\n # CONFIG_FILE\n config_file = sys.argv[8]\n if not os.path.exists(config_file):\n print(f\"[ERR] {config_file} does not exist!\")\n sys.exit(1)\n\n return fcst_syr, fcst_eyr, month_abbr, month_num, fcst_type, lead_months, \\\n \tens_num, config_file", "def readCommandLine(self, **kwargs):\n try:\n condition = kwargs[\"fname\"].split(':')[0]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n\n # experimentation parameters\n answer = raw_input('Change %s: %s to: ' % (condition,self.exp_predicates[condition]))\n self.exp_predicates[condition] = answer\n self.mm.loadMenu(\"teachMenu\")", "def test_type_arg(self, parse_input):\n with pytest.warns(SyntaxWarning, match=\"Only keyword options of the form\"):\n parse_input(\"name testname\\nversion 1.0\\ntarget example (6)\\ntype example (42)\")", "def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()", "def read_user_input(self):\n\n self.commandline = raw_input(\"Enter the string you want to parse\\n\")", "def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 5:\n print(\"[ERR] Invalid number of command line arguments!\")\n _usage()\n sys.exit(1)\n\n # Get path to config file\n configfile = sys.argv[1]\n if not os.path.exists(configfile):\n print(f\"[ERR] Config file {configfile} does not exist!\")\n sys.exit(1)\n\n # Get top directory of LIS data\n topdatadir = sys.argv[2]\n if not os.path.exists(topdatadir):\n print(f\"[ERR] LIS data directory {topdatadir} does not exist!\")\n sys.exit(1)\n\n # Get valid year and month\n yyyymm = sys.argv[3]\n if len(yyyymm) != 6:\n print(\"[ERR] Invalid length of YYYYMM, must be 6 characters!\")\n sys.exit(1)\n year = int(yyyymm[0:4])\n month = int(yyyymm[4:6])\n try:\n startdate = datetime.datetime(year, month, day=1)\n except ValueError:\n print(\"[ERR] Invalid YYYYMM passed to script!\")\n sys.exit(1)\n\n # Get model forcing ID\n model_forcing = sys.argv[4]\n\n return configfile, topdatadir, startdate, model_forcing", "def test_arg_type(args, arg, arg_type):\n try:\n arg_type(args[arg])\n except Exception:\n raise GaiaException('Required argument {} must be of type {}'\n .format(arg, arg_type))", "def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 4:\n print(\"[ERR] Invalid number of command line arguments!\")\n print(len(sys.argv))\n print(sys.argv[:])\n _usage()\n sys.exit(1)\n\n # Check if lis.config template exists.\n lis_config_template = sys.argv[1]\n if not os.path.exists(lis_config_template):\n print(f\"[ERR] {lis_config_template} does not exist!\")\n sys.exit(1)\n\n # Check if directory for restart files exists. Actual restart file\n # shall be checked later.\n restart_dir = sys.argv[2]\n if not os.path.exists(restart_dir):\n print(f\"[ERR] Directory {restart_dir} does not exist!\")\n sys.exit(1)\n\n # Get start date of new LIS run.\n yyyymmdd = sys.argv[3]\n if len(yyyymmdd) != 8:\n print(\"[ERR] Invalid length for YYYYMMDD, must be 8 characters!\")\n sys.exit(1)\n year = int(yyyymmdd[0:4])\n month = int(yyyymmdd[4:6])\n day = int(yyyymmdd[6:8])\n try:\n startdate = datetime.date(year, month, day)\n except ValueError:\n print(\"[ERR] Invalid YYYYMMDD passed to script!\")\n sys.exit(1)\n\n return lis_config_template, restart_dir, startdate", "def parse_args(args):\n assert os.path.isfile(args.data_path), \"The specified data file does not exist.\"\n assert os.path.isfile(args.model_path), \"The specified model file does not exist.\"\n\n if args.read_batches is not False:\n if args.read_batches.lower() in (\"y\", \"yes\", \"1\", \"\", \"true\", \"t\"):\n args.read_batches = True\n else:\n args.read_batches = False", "def parse_and_validate_cmd_line():\n if len(sys.argv) != 4:\n print USAGE_STR.format(sys.argv[0])\n sys.exit()\n # attempt to parse the parameters tell the user and exit if we can't\n num_segments = parse_and_validate_num_segs(sys.argv[1])\n # try to parse numThreads\n num_threads = parse_and_validate_num_threads(sys.argv[2])\n # try to parse and test the data directory\n data_dir = parse_and_validate_data_dir(sys.argv[3])\n return num_segments, num_threads, data_dir", "def main(argv,required_arg,required_arg_type,optional_arg):\n \n # add optional_arguments to the parser\n for option in optional_arg:\n parse_option_dictionary[option]()\n \n # parse the command line\n passed_optional_arg, passed_required_arg = parser.parse_args(argv)\n \n required_arg_values = grabRequiredArgs(passed_required_arg,required_arg,\n required_arg_type)\n\n return required_arg_values, passed_optional_arg", "def validate_args(self, in_args, cmd_call):\n valid_1, valid_2 = None, None\n\n if len(in_args) > 0 and type(in_args) is not list:\n args = in_args.split()\n valid_1 = args[0]\n elif type(in_args) is list and len(in_args) > 0:\n args = in_args\n valid_1 = args[0]\n else:\n args = []\n\n if cmd_call in ['default']:\n # Default : Returns a valid cui type for an input cui\n # checks to see if there is more than 2 arguments\n # if so, arg[0] may be a valid code\n # arg[1] may be a valid code type\n # if not ask the user what type of code type arg[0] is\n # valid_1 = valid cui type\n # valid_2 = None\n while True:\n if len(args) >= 2 and len(args) <= 3:\n input_type = args[1].upper()\n else:\n input_type = input(\"What type of id is '{0}'? [LOCAL/RXCUI/NDC/SNOMED]\".format(args[0])).upper()\n\n # Confirm it's a valid code type\n valid_type = self.validate_id_type(input_type)\n # Valid type is a boolean of True\n if isinstance(valid_type, str) or valid_type is None:\n return None\n elif valid_type:\n break\n elif not valid_type:\n print('Invalid Option, Please Try Again')\n continue\n valid_1 = input_type\n\n elif cmd_call in self.cmd_config_default:\n # valid_1 : Valid Cui , valid_2 : Valid Cui Type\n valid_2, _ = self.validate_args(args, 'default')\n valid_1 = args[0]\n\n elif cmd_call == 'code_lookup':\n # args[0] : Initial CUI, args[1] : Initial CUI Type, args[2] : Target CUI Type\n # valid_1 : valid cui, valid_2 : list valid source and target\n _dict_opts = util.OPTIONS_CUI_TYPES.copy()\n _avail = list(set(smores.get_dict_sources()) & set(_dict_opts))\n if len(_avail) == 0 and len(args) < 2:\n print('There are no available starting cui types that can be crosswalked.\\n'\n 'Please load a file containing valid cui types: {0}'.format(_dict_opts))\n return False, None\n\n if len(args) >= 2:\n if len(args) == 3:\n # provided cui, cui source, and target\n valid_2, _ = self.validate_args(args, 'default')\n source, target = args[1].upper(), args[2].upper()\n else:\n source, target = args[0].upper(), args[1].upper()\n valid_1 = simple_input(\"Is {0} the correct starting source? \".format(source), ['YES', 'NO', 'exit'])\n if valid_1 == 'exit':\n return False, None\n # TODO need path for valid_2\n else:\n valid_1 = simple_input(\"Which code set do you want to start with?\", _avail)\n if valid_1 != 'exit':\n _dict_opts.remove(valid_1) # Don't lookup what we've already got\n valid_2 = simple_input(\"Which code set do you want to get results for?\", _dict_opts)\n if valid_2 == 'exit':\n return False, None\n else:\n return False, None\n\n elif cmd_call == 'errors':\n _current_err = list(self.errors.keys())\n if len(args) > 1:\n smores_error('#Cx001.7', console_p=True)\n return\n elif len(args) == 1 and args[0].lower() in _current_err:\n valid_1 = args[0]\n elif len(args) == 1:\n print('There are currently no errors logged for that command.')\n return\n else:\n valid_1 = simple_input(\"Please choose a command from the list to see errors: \", _current_err)\n\n elif cmd_call in ['csv', 'remap', 'fhir', 'json']:\n # Format: [File] [Output]\n if not self.inputs['loaded']:\n print(\"No Files Loaded!\\nYou Must load a file containing local medications first\")\n else:\n _file_opts = list(self.inputs['files'].keys()) + ['All']\n _dict_opts = list(smores.get_dict_sources(True)) #+ ['All']\n _file_or_dict = None\n\n if cmd_call in ['csv', 'json']:\n if len(args) == 0:\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n elif args[0] not in _file_opts and args[0] not in _dict_opts:\n print('That option was not recognized as a valid source.')\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n else:\n valid_1 = args[0]\n\n if _file_or_dict.upper() == 'FILE':\n valid_1 = 'FILE|' + simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n elif _file_or_dict.upper() == 'DICTIONARY':\n valid_1 = 'DICT|' + simple_input(\"Please choose a code dictionary to output\", _dict_opts, True)\n elif _file_or_dict.upper() == 'EXIT':\n return None, None\n\n else:\n valid_1 = simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n if cmd_call in ['csv', 'json', 'fhir']:\n if len(args) == 2 and len(args[1]) > 0:\n valid_2 = args[1]\n else:\n valid_2= input(\"Please provide an output file name:\").strip()\n\n if len(valid_2) > 0:\n if \".\" in valid_2:\n valid_2, ext = valid_2.split(\".\")\n else:\n valid_2 = ''\n print('Empty file name provided, using default.')\n else:\n valid_2 = args[0]\n\n elif cmd_call == 'file':\n re_use = False\n if self.inputs['loaded'] and len(in_args) == 0:\n print(\"The following file(s) have already been loaded: \\n\" + str(self.inputs['files']))\n _load_more = simple_input(\"Would you like to load an additional file?\", ['Y', 'N', 'exit'])\n if _load_more == 'Y':\n pass\n elif _load_more == 'N':\n _re_use = simple_input(\"Would you like to re-use a loaded file?\", ['Y', 'N', 'exit'])\n if _re_use == 'Y':\n re_use = True\n else:\n return False, None\n else:\n return False, None\n\n if in_args is not None and len(in_args) > 0:\n valid_1 = in_args\n else:\n valid_1 = input(\"Please enter the name of the file to load: \") if not re_use else simple_input(\n 'Select the file to be used: ', list(self.inputs['files'].keys()), index=True)\n\n while True:\n if valid_1 in self.inputs['files']:\n if not re_use:\n print(\"It looks like you've already loaded that file. Please try a different file.\")\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n elif len(valid_1) == 0:\n smores_error('#Cx001.7', logger=smoresLog)\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n\n if not resolve_target_path(valid_1):\n valid_1, valid_2 = self.validate_args('', 'file')\n\n elif '.smr' in valid_1:\n if len(self.inputs['files']) > 0:\n print(\n 'It looks like you are trying to load a session, this will replace the current session and '\n 'all previous work.')\n _save = simple_input('Do you want to save the current session first?', ['Y', 'N', 'EXIT'])\n if _save == 'Y':\n smores.save_session(self.__version__)\n elif _save == 'EXIT':\n return False, None\n valid_2 = 'session'\n else:\n valid_2 = 'file'\n\n smoresLog.debug('Args: {0}, Validated as: {1}'.format(valid_1, valid_2))\n return valid_1, valid_2", "def processCmdLineArgs(expectedTypes, usage):\n\targs = []\n\tnumComLineArgs = len(sys.argv)\n\tnumExpected = len(expectedTypes)\n\tif (numComLineArgs - 1 == len(expectedTypes)):\n\t\ttry:\n\t\t\tfor i in range(0, numExpected):\n\t\t\t\tif (expectedTypes[i] == typeInt):\n\t\t\t\t\targs.append(int(sys.argv[i+1]))\n\t\t\t\telif (expectedTypes[i] == typeFloat):\n\t\t\t\t\targs.append(float(sys.argv[i+1]))\n\t\t\t\telif (expectedTypes[i] == typeString):\n\t\t\t\t\targs.append(sys.argv[i+1])\n\t\texcept ValueError:\n\t\t\tprint (\"expected number of command line arguments found but there is type mis match\")\n\t\t\tsys.exit(1)\n\telse:\n\t\tprint (\"expected number of command line arguments not found\")\n\t\tprint (usage)\n\t\tsys.exit(1)\n\treturn args", "def test_type_kwarg(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example (6)\\ntype example (copies=1000)\")\n assert bb.programtype[\"options\"] == {\"copies\": 1000}", "def test_atleast_two_arguments_needed_one():\n cli_result = subprocess.run(\n ['kaiba', 'config.js'],\n capture_output=True,\n )\n assert b'the following arguments are required: input' in cli_result.stderr\n assert cli_result.returncode == 2", "def test_parser_init_with_valid_project_type(parser):\n parser.parse_args(['--init', 'java'])", "def main():\n\targuments_sent = sys.argv\n\tif len(arguments_sent) > 1:\n\t\tfile_path = arguments_sent[1]\n\t\tprocess_based_on_type(file_path)", "def get_type_check(self, arg, option):\n pass", "def main():\r\n args = sys.argv\r\n print 'Script:', args[0]\r\n args.pop(0)\r\n for i, argument in enumerate(sys.argv):\r\n print 'Argument {}: {}'.format(i, argument)\r\n print 'Type: {}'.format(type(argument))", "def test_from_command_line():\n config_file = t_path(\n Path('steps') / 'jwst_generic_pars-makeliststep_0001.asdf'\n )\n args = [config_file]\n step = Step.from_cmdline(args)\n assert isinstance(step, MakeListStep)\n assert step.par1 == 42.0\n assert step.par2 == 'Yes, a string'\n\n results = step.run()\n assert results == DEFAULT_RESULT", "def test_from_command_line_override():\n config_file = t_path(\n Path('steps') / 'jwst_generic_pars-makeliststep_0001.asdf'\n )\n args = [config_file, '--par1=0.']\n step = Step.from_cmdline(args)\n assert isinstance(step, MakeListStep)\n assert step.par1 == 0.\n assert step.par2 == 'Yes, a string'\n\n results = step.run()\n assert results == [0., DEFAULT_PAR2, False]", "def read_command( argv ):\n from optparse import OptionParser\n usageStr = \"\"\"\n USAGE: python tortoise.py <options>\n EXAMPLES: python tortoise.py --agent ReflexBrain\n OR python tortoise.py -a ReflexBrain\n - run tortoise with the reflex agent\n \"\"\"\n parser = OptionParser(usageStr)\n \n parser.add_option('-a', '--agent', dest = 'agent',\n help = default('The agent to use'),\n metavar = 'TYPE', default = 'ReflexBrain')\n parser.add_option('-w', '--width', dest = 'width',\n help = default('World width'), default = 15)\n parser.add_option('-s', '--speed', dest = 'speed',\n help = default('Speed'), default = 40)\n parser.add_option('-r', '--random-seed', dest = 'random_seed',\n help = default('Random'), default = -1)\n \n options, otherjunk = parser.parse_args(argv)\n\n if len(otherjunk) != 0:\n raise Exception('Command line input not understood: ' + str(otherjunk))\n args = dict()\n \n # Choose a Tortoise solver\n try:\n module = __import__('agents')\n if options.agent in dir(module):\n agent = getattr(module, options.agent)\n args['agent'] = agent()\n else:\n raise Exception('Unknown agent: ' + options.agent)\n except ImportError:\n raise Exception('No file agents.py')\n \n args['width'] = int(options.width)\n args['speed'] = int(options.speed)\n args['random_seed'] = int(options.random_seed)\n return args", "def read(self, sys):\n\n # if no options are set, print help\n if len(sys.argv) == 1:\n sys.argv.append('-h')\n\n # make sure parameter file is processed first\n # so that all options on the command line\n # have precedence\n if any(['@' in xx[0] for xx in sys.argv]):\n paridx = [xx[0] for xx in sys.argv].index('@')\n parfile = sys.argv.pop(paridx)\n sys.argv.insert(1, parfile)\n\n opt = self.parser.parse_args()\n\n # transform some parameters to proper types\n try:\n if 1 != opt.beamscaling:\n opt.beamscaling = [float(xx)\n for xx\n in opt.beamscaling.split(',')]\n\n if opt.feed:\n opt.feed = self.parse_range(opt.feed)\n\n if opt.pol:\n opt.pol = self.parse_range(opt.pol)\n\n if opt.window:\n opt.window = self.parse_range(opt.window)\n\n if opt.mapscans:\n opt.mapscans = self.parse_range(opt.mapscans)\n\n if opt.refscans:\n opt.refscans = self.parse_range(opt.refscans)\n\n except ValueError:\n print('ERROR: there is a malformed parameter option')\n print(' please check your command line settings and try again.')\n sys.exit()\n\n opt.units = opt.units.lower()\n\n return opt", "def validate_argtype(arg, argtype):\r\n if not isinstance(arg, argtype):\r\n raise HelperException('{0} argument must be of type {1}'.format(\r\n arg, argtype))\r\n return arg", "def read_cmd(self):\n\n parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\n req_opts = parser.add_argument_group(\"Required Options\")\n req_opts.add_argument(\"--instance_dir\", required=True,\n help=\"directory with instances (not recursive\")\n \n opt_opts = parser.add_argument_group(\"Optional Options\")\n \n opt_opts.add_argument(\"--fn_suffix\", default=\".*\",\n help=\"suffix of instance file names\")\n opt_opts.add_argument(\"--cutoff\", default=10, type=int,\n help=\"running time cutoff [sec]\")\n opt_opts.add_argument(\"--memlimit\", default=2048, type=int,\n help=\"memory limit\")\n opt_opts.add_argument(\"--ac_budget\", default=360,\n help=\"configuration budget [sec]\")\n opt_opts.add_argument(\"--run_obj\", default=\"runtime\",\n choices=[\"runtime\", \"quality\"],\n help=\"run objective\")\n opt_opts.add_argument(\"--par-factor\", default=10,\n help=\"Factor by which to penalize unsolved instances. Usage may differ based on TAE used.\")\n\n opt_opts.add_argument(\"--binary\", default=\"clingo\",\n help=\"target binary\")\n opt_opts.add_argument(\"--pcs_file\", default=\"pcs/all_params.pcs\",\n help=\"parameter configuration file\")\n opt_opts.add_argument(\"--runsolver\", default=\"binaries/runsolver\",\n help=\"runsolver binary\")\n opt_opts.add_argument(\"--tae_class\", default=None,\n help=\"TAE class to individualize clingo calls -- has to inherit from smac.tae.execute_ta_run_aclib.ExecuteTARunAClib\")\n\n\n opt_opts.add_argument(\"--seed\", default=12345, type=int,\n help=\"random seed\")\n opt_opts.add_argument(\"--verbose_level\", default=logging.INFO,\n choices=[\"INFO\", \"DEBUG\"],\n help=\"random seed\")\n opt_opts.add_argument(\"--tae_args\", default=\"{}\",\n help=\"Miscellaneous options for the TAE\")\n \n\n args_, misc = parser.parse_known_args()\n self._check_args(args_)\n args_.tae_args=json.loads(args_.tae_args)\n\n # remove leading '-' in option names\n misc = dict((k.lstrip(\"-\"), v.strip(\"'\"))\n for k, v in zip(misc[::2], misc[1::2]))\n\n misc[\"instances\"] = self._find_files(dir_=args_.instance_dir, suffix_=args_.fn_suffix)\n misc[\"wallclock_limit\"] = args_.ac_budget\n misc[\"cutoff_time\"] = args_.cutoff\n misc[\"paramfile\"] = args_.pcs_file\n misc[\"algo\"] = \"\"\n misc[\"run_obj\"] = args_.run_obj\n\n return args_, misc", "def errFunc(runType):\n logger.error('Execution type not recognized! {}'.format(runType))\n raise InvalidExecutionType('{} is not a valid command'.format(runType))", "def _validate_edit_command(args):\n res = _check_entry_name(args)\n if res != 0:\n return res\n\n # If no new type is specified on the command line then leave validation of\n # property arguments to _process_edit_command() when a type of the existing\n # entry is determined.\n if args.type is None:\n return 0\n\n return _check_property_arguments(args, args.type)", "def getopt():\n raise NotImplementedError()", "def getTRSLanguage():\n try:\n return sys.argv[1]\n except IndexError as error:\n print(\"No language argument\\n\")\n sys.exit()", "def check_argv():\n parser = argparse.ArgumentParser(description=__doc__.strip().split(\"\\n\")[0], add_help=False)\n parser.add_argument(\"segment_fn\", type=str, help=\"pickled segmentation file\")\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()", "def parse_argument(argument_option):\n index = sys.argv.index(argument_option)\n try:\n argument = sys.argv[index+1]\n except IndexError:\n print('ERROR: Invalid argument!')\n print(__doc__)\n print(unittest.main.__doc__)\n else:\n sys.argv.pop(index)\n sys.argv.pop(index)\n return argument", "def check_args():\n schema = Schema({\n 'FOLDREC': Use(open, error='FOLDREC file should be readable'),\n 'CLUSTAL': Use(open, error='CLUSTAL file should be readable'),\n 'CCMPRED': Use(open, error='CCMPRED file should be readable'),\n '--metafold': Use(open, error='METAFOLD_FILE should be readable'),\n '--nb_pdb': And(Use(int), lambda n: 1 <= n <= 405,\n error='--nb_pdb=NUM should be integer 1 <= N <= 405'),\n '--dssp': Use(open, error='dssp/mkdssp should be readable'),\n '--dope': Use(open, error='dope file should be readable'),\n '--benchmark': Use(open, error='BENCHMARK_FILE should be readable'),\n '--cpu': And(Use(int), lambda n: 0 <= n <= cpu_count(),\n error='--cpus=NUM should be integer 1 <= N <= ' + str(cpu_count())),\n # The output PATH is created (if not exists) at the end of the program\n # so we skip the check.\n object: object})\n try:\n schema.validate(ARGUMENTS)\n except SchemaError as err:\n exit(err)", "def run(self, line):\r\n if os.name == 'nt':\r\n if not ctypes.windll.shell32.IsUserAnAdmin() != 0:\r\n self.app.typepath.adminpriv = False\r\n elif not os.getuid() == 0:\r\n self.app.typepath.adminpriv = False\r\n\r\n nargv = []\r\n curr = []\r\n argfound = False\r\n\r\n if \"--version\" in line or \"-V\" in line:\r\n sys.stdout.write(\"\"\"%(progname)s %(version)s\\n\"\"\" % \\\r\n {'progname': versioning.__longname__, 'version': \\\r\n versioning.__version__})\r\n sys.stdout.flush()\r\n sys.exit(self.retcode)\r\n\r\n else:\r\n for argument in enumerate(line):\r\n if not argfound and not argument[1].startswith('-'):\r\n nargv = line[argument[0]:]\r\n break\r\n else:\r\n argfound = False\r\n\r\n if argument[1] == \"-c\":\r\n argfound = True\r\n\r\n curr.append(argument[1])\r\n\r\n (self.opts, _) = self.parser.parse_args(curr)\r\n\r\n try:\r\n Encryption.encode_credentials('test')\r\n self.app.set_encode_funct(Encryption.encode_credentials)\r\n self.app.set_decode_funct(Encryption.decode_credentials)\r\n self.encoding = True\r\n except redfish.hpilo.risblobstore2.ChifDllMissingError:\r\n self.encoding = False\r\n\r\n if self.opts.config is not None and len(self.opts.config) > 0:\r\n if not os.path.isfile(self.opts.config):\r\n self.retcode = ReturnCodes.CONFIGURATION_FILE_ERROR\r\n sys.exit(self.retcode)\r\n\r\n self.app.config_file = self.opts.config\r\n\r\n self.app.config_from_file(self.app.config_file)\r\n if self.opts.logdir and self.opts.debug:\r\n logdir = self.opts.logdir\r\n else:\r\n logdir = self.app.config.get_logdir()\r\n\r\n if logdir and self.opts.debug:\r\n try:\r\n os.makedirs(logdir)\r\n except OSError as ex:\r\n if ex.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise\r\n\r\n if self.opts.debug:\r\n logfile = os.path.join(logdir, versioning.__shortname__+'.log')\r\n\r\n # Create a file logger since we got a logdir\r\n lfile = logging.FileHandler(filename=logfile)\r\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s\\t: \" \\\r\n \"%(message)s\")\r\n\r\n lfile.setFormatter(formatter)\r\n lfile.setLevel(logging.DEBUG)\r\n LOGGER.addHandler(lfile)\r\n self.app.LOGGER = LOGGER\r\n\r\n cachedir = None\r\n if self.opts.nocache:\r\n self.app.config.set_cache(False)\r\n else:\r\n self.app.config.set_cachedir(os.path.join(self.opts.config_dir, \\\r\n 'cache'))\r\n cachedir = self.app.config.get_cachedir()\r\n\r\n if cachedir:\r\n try:\r\n os.makedirs(cachedir)\r\n except OSError as ex:\r\n if ex.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise\r\n\r\n if (\"login\" in line or any(x.startswith(\"--url\") for x in line) or not line)\\\r\n and not (any(x.startswith((\"-h\", \"--h\")) for x in nargv) or \"help\" in line):\r\n self.app.logout()\r\n else:\r\n self.app.restore()\r\n self.opts.is_redfish = self.app.updatedefinesflag(redfishflag=\\\r\n self.opts.is_redfish)\r\n\r\n if nargv:\r\n try:\r\n self.retcode = self._run_command(self.opts, nargv)\r\n if self.app.config.get_cache():\r\n if (\"logout\" not in line) and (\"--logout\" not in line):\r\n self.app.save()\r\n else:\r\n self.app.logout()\r\n except Exception as excp:\r\n self.handle_exceptions(excp)\r\n\r\n return self.retcode\r\n else:\r\n self.cmdloop(self.opts)\r\n\r\n if self.app.config.get_cache():\r\n self.app.save()\r\n else:\r\n self.app.logout()", "def test_parser_init_with_invalid_project_type(parser):\n with pytest.raises(SystemExit):\n parser.parse_args(['--init', 'error'])", "def validate_command_line_input(args):\n valid = False\n if 0 < len(args) <= 4:\n valid = True\n for arg in args:\n if int(arg) > 4:\n valid = False\n break\n else:\n pass\n if valid:\n CRUDStarter.load_operations(args)\n pass\n else:\n CRUDStarter.logger.info(\"Argument maximum acceptable value is 4\")\n else:\n CRUDStarter.logger.info(\"at least One at most Four argument(s) required\")", "def check_input_options(args):\n\n # Make sure the input file is trimmed for use later on in the program.\n args.input_file = args.input_file.strip()\n\n # Make sure the output_type string(s) is(are) trimmed and lowercase.\n args.output_type = [x.strip().lower() for x in args.output_type]\n\n # The DPI value must be greater than zero...\n if args.dpi_val <= 0.:\n raise ValueError(\"DPI value must be > 0.\")", "def check_input_options(args):\n\n # Make sure the input file is trimmed for use later on in the program.\n args.input_file = args.input_file.strip()\n\n # Make sure the output_type string(s) is(are) trimmed and lowercase.\n args.output_type = [x.strip().lower() for x in args.output_type]\n\n # The DPI value must be greater than zero...\n if args.dpi_val <= 0.:\n raise ValueError(\"DPI value must be > 0.\")", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def readCommand( argv ): ## argv belongs to the 'sys'-library and can be called through sys.argv. The function reads the console's comand line argument and passes it to a variable like so: args = sys.argv[1:]\n from optparse import OptionParser ## Option Parser is a powerful library for passing command line options (an advanced args) if you like. It allows you to add options by defining attributes. \n usageStr = \"\"\" \n USAGE: python pacman.py <options> \n EXAMPLES: (1) python pacman.py\n - starts an interactive game\n (2) python pacman.py --layout smallClassic --zoom 2\n OR python pacman.py -l smallClassic -z 2\n - starts an interactive game on a smaller board, zoomed in\n \"\"\" \n parser = OptionParser(usageStr) ## This creates the Option Parser instance. It also passes the usageStr which functions as a little help-text for the user.\n\n ### In this section all the option strings are defined. Typically each option has one short option string and one long option string. For example the parser.add_option('-n'... has '-n' as short and '--numGames' as the long option string. Both have the same effect. The option argument will be the same and be saved as the variabme 'numGames'. \n parser.add_option('-n', '--numGames', dest='numGames', type='int', \n help=default('the number of GAMES to play'), metavar='GAMES', default=1) ## the syntax for the options is (based on the example in this line) --n 3. This means that the value '3' would be assigned to the variable numGames.\n parser.add_option('-l', '--layout', dest='layout',\n help=default('the LAYOUT_FILE from which to load the map layout'), #The instance -> 'options.layout' defines the layout_file from which to load the map layout; DEFAULT = medium_classic\n metavar='LAYOUT_FILE', default='mediumClassic')\n parser.add_option('-p', '--pacman', dest='pacman',\n help=default('the agent TYPE in the pacmanAgents module to use'), #The instance -> 'options.pacman' defines which of the agent TYPE in the pacmanAgents moduleto use.\n metavar='TYPE', default='KeyboardAgent')\n parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',\n help='Display output as text only', default=False)\n parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',\n help='Generate minimal output and no graphics', default=False)\n parser.add_option('-g', '--ghosts', dest='ghost',\n help=default('the ghost agent TYPE in the ghostAgents module to use'),\n metavar = 'TYPE', default='RandomGhost')\n parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',\n help=default('The maximum number of ghosts to use'), default=4)\n parser.add_option('-z', '--zoom', type='float', dest='zoom',\n help=default('Zoom the size of the graphics window'), default=1.0)\n parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',\n help='Fixes the random seed to always play the same game', default=False)\n parser.add_option('-r', '--recordActions', action='store_true', dest='record',\n help='Writes game histories to a file (named by the time they were played)', default=False)\n parser.add_option('--replay', dest='gameToReplay',\n help='A recorded game file (pickle) to replay', default=None)\n parser.add_option('-a','--agentArgs',dest='agentArgs',\n help='Comma separated values sent to agent. e.g. \"opt1=val1,opt2,opt3=val3\"')\n parser.add_option('-x', '--numTraining', dest='numTraining', type='int',\n help=default('How many episodes are training (suppresses output)'), default=0)\n parser.add_option('--frameTime', dest='frameTime', type='float',\n help=default('Time to delay between frames; <0 means keyboard'), default=0.1)\n parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',\n help='Turns on exception handling and timeouts during games', default=False)\n parser.add_option('--timeout', dest='timeout', type='int',\n help=default('Maximum length of time an agent can spend computing in a single game'), default=30)\n\n #ONCE ALL THE OPTIONS HAVE BEEN DEFINED, optparse is instructed to parse the programm's command line.\n ##> The parser.parse_args() returns two values:\n ### (A) OPTIONS: An object containing values for all of your options e.g.:e.g. if --file takes a single string argument, then options.file will be the filename supplied by the user, or None if the user did not supply that option\n ### (B) ARGS: The list of positional arguments leftover after parsing options (we call this here otherjunk)\n options, otherjunk = parser.parse_args(argv) ## if the user happens to accidentally enter a command other than the specified arguments specified by parser.add_option it is passed to otherjunk\n if len(otherjunk) != 0: ## if there actually ends up to be a value in the otherjunk the program raises an Exception.\n raise Exception('Command line input not understood: ' + str(otherjunk))\n args = dict() # ARGS IS THE VARIABLE THAT IS BEING RETURNED BY THE readCommand function.\n\n # Fix the random seed\n if options.fixRandomSeed: random.seed('cs188') # 'random.seed' is part of the random class. The random.seed([x]) command initialises a standard random number. Optional argument x can be any hashable object. \n\n # Choose a layout\n args['layout'] = layout.getLayout( options.layout ) # REF_LAYOUT111: layout.py --> This function returns the layout object that was created by the layout class via the getlayout function. This contains the height, width, walls, food, captules and agent positions etc.\n if args['layout'] == None: raise Exception(\"The layout \" + options.layout + \" cannot be found\")\n\n # Choose a Pacman agent\n noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics) ## noKeyboard is set to TRUE if the user chooses the --replay and text- or silent graphics option.\n ##print noKeyboard\n pacmanType = loadAgent(options.pacman, noKeyboard) ## [see REFERENCE_001]: the loadAgent function takes the pacman argument the user passed into the command line as the option--pacman option identifies the appropriate agent (which may be the programmed agent or whost agent). \n agentOpts = parseAgentArgs(options.agentArgs) ##Passes the option.agentArgs which was captured by the user's console input into the agentOps variable. agentArgs is: \"Comma separated values sent to agent. e.g. \"opt1=val1,opt2,opt3=val3. The ParseAgentArgs function converts the option - value pairings into a dictionary formatted opts[opt1] = val1. \n if options.numTraining > 0: ##numTraining was captured by the user's console input and designates how many games are training games which means that the output remains surpressed.\n args['numTraining'] = options.numTraining ## This takes the user's input as the -x or --numTraining and passes it to the args dictionary with the numTraining key as the args['numTraining'] variable.\n if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining ## This integrates the variable entered into as training rounds in the agentOpts variable.\n pacman = pacmanType(**agentOpts) ## REFERENCE002 ##Instantiate Pacman with agentOpts. ## The variable pacmanType contains a reference to agent module loaded by the load Agent function. This function does not cause the module to be instanciated. This happens when here ## See[REFERENCE_001]: ## The * and ** will 'soak up' any remaining values not otherwise accounted for. In this case these options are basically the agent options the user can input.\n ## agentOpts contains the opts dictionary = {opt1:val1, opt2:val2, opt3:val3}; it also contains the numTraining variable as the ['numTraining'] key. As such it has the following structure. {opt1:val1,opt2:val2,opt3:val3, numTraining:int}.\n args['pacman'] = pacman ## This passes the instanciated object to the agent dictionary containing the pacman key.\n\n # Don't display training games\n if 'numTrain' in agentOpts: ## Checks whether the user has determined a certain number of training games. If they did, the number is passed on as an int to the options.numQuiet and option.numIgnore variables.\n options.numQuiet = int(agentOpts['numTrain']) \n options.numIgnore = int(agentOpts['numTrain'])\n\n # Choose a ghost agent\n ghostType = loadAgent(options.ghost, noKeyboard) ## The options.ghost variable contains the user's ghost type preference as specified in the console.The user can choose between -g RandomGhost which is A ghost that chooses a legal action uniformly at random OR DirectionalGhost, a ghost that prefers to rush Pacman, or flee when scared.\n args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )] #instanciates as many ghost agents as the player requested by entering the desired number as -k', '--numghosts'in the console.\n\n # Choose a display format ##contains whether the game output is displayed as minimal output and no graphics (-q) text only (-t) or via graphicsDiplay (standard)\n if options.quietGraphics: \n import textDisplay\n args['display'] = textDisplay.NullGraphics()\n elif options.textGraphics:\n import textDisplay\n textDisplay.SLEEP_TIME = options.frameTime\n args['display'] = textDisplay.PacmanGraphics()\n else:\n import graphicsDisplay ## This refers to the module that is responsible for the graphical representation of the game.\n args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime) ## This line instanciates the PacmanGraphics class from the graphicsDisplay module and passes the reference to the args['display'] dictionary.\n args['numGames'] = options.numGames \n args['record'] = options.record\n args['catchExceptions'] = options.catchExceptions\n args['timeout'] = options.timeout\n\n # Special case: recorded games don't use the runGames method or args structure\n if options.gameToReplay != None:\n print 'Replaying recorded game %s.' % options.gameToReplay \n import cPickle\n f = open(options.gameToReplay)\n try: recorded = cPickle.load(f)\n finally: f.close()\n recorded['display'] = args['display']\n replayGame(**recorded)\n sys.exit(0)\n\n return args #returns the args-dictionary which contains:\n ##args['pacman'] which contains a dictionary of dictionaries of the agent that was loaded into args['numtraining'] = {agentOpts[opt1]: val1 ; agentOpts[opt2]:val2; agentOpts[opt3]:val3}\n ##args['layout'] - this function returns the layout object that was created by the layout class via the getlayout function.\n ##args['numTraining'] which contains which designates how many games are training games which means that the output remains surpressed\n ##args['ghosts'] - contains the instanciated ghost agents in line with the number the user specified\n ##args['display'] - contains whether the game output is displayed as minimal output and no graphics (-q) text only (-t) or via graphicsDiplay (standard)\n ##args['numGames'] - the number of GAMES to play\n ##args['record'] - Writes game histories to a file (named by the time they were played)\n ##args['catchExceptions'] = options.catchExceptions - Turns on exception handling and timeouts during games\n ##args['timeout'] = options.timeout -Maximum length of time an agent can spend computing in a single game", "def configure_commandline(cmdline_arguments: argparse.Namespace) -> Optional[Text]:", "def main():\n\tn = len(sys.argv)\n\tif n == 1:\n\t main_menu()\n\telif n > 3:\n\t print(\"Invalid number of args\")\n\telse:\n\t features = {\n\t \"add\": add,\n\t \"help\": main_menu,\n\t \"ls\": getTasks,\n\t \"done\": markOff,\n\t \"report\": getReport,\n\t \"del\": deleteTask}\n\t choosedFeature = features.get(sys.argv[1],\"Invalid Argument\")\n\t if choosedFeature == \"Invalid Argument\":\n\t \tprint(choosedFeature)\n\t else:\n\t \tchoosedFeature()", "def parse_arguments(args):", "def test_invalid_arguments(self):\n # More than two arguments should report an error.\n exit_code, output = run_cli('a', 'b', 'c')\n assert exit_code != 0\n assert \"Error\" in output\n # Invalid `ionice' values should report an error.\n exit_code, output = run_cli('--ionice=foo')\n assert exit_code != 0\n assert \"Error\" in output", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def check_input(args):\n\n # Defaults\n option = ''\n fh = sys.stdin # file handle\n\n if not len(args):\n # Reading from pipe with default option\n if sys.stdin.isatty():\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n elif len(args) == 1:\n # One of two options: option & Pipe OR file & default option\n if args[0].startswith('-'):\n option = args[0][1:]\n if sys.stdin.isatty(): # ensure the PDB data is streamed in\n emsg = 'ERROR!! No data to process!\\n'\n sys.stderr.write(emsg)\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n else:\n if not os.path.isfile(args[0]):\n emsg = 'ERROR!! File not found or not readable: \\'{}\\'\\n'\n sys.stderr.write(emsg.format(args[0]))\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n fh = open(args[0], 'r')\n\n elif len(args) == 2:\n # Two options: option & File\n if not args[0].startswith('-'):\n emsg = 'ERROR! First argument is not an option: \\'{}\\'\\n'\n sys.stderr.write(emsg.format(args[0]))\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n if not os.path.isfile(args[1]):\n emsg = 'ERROR!! File not found or not readable: \\'{}\\'\\n'\n sys.stderr.write(emsg.format(args[1]))\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n option = args[0][1:]\n fh = open(args[1], 'r')\n\n else: # Whatever ...\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n # Validate option\n if len(option) > 1:\n emsg = 'ERROR!! Alternate location identifiers must be single '\n emsg += 'characters: \\'{}\\''\n sys.stderr.write(emsg.format(option))\n sys.exit(1)\n\n return (option, fh)", "def get_file_read_arg(self):\n\ttry:\n\t arg = sys.argv[1]\n\t file_read = str(arg)\n\texcept IndexError:\n\t print \"Please provide the name of the file that you wish to receive.\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\") \n\tif (len(file_read) > 100):\n\t print \"Name of file must be equal to or less than 100 characters.\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\")\n\telse:\n\t return file_read", "def get_command_line_args(argv):\n # Initialize the arguments to their default values \n\n args = {'startdate': '20200101',\n 'enddate': '20200102',\n 'outfile': 'test.nc',\n 'dt': 5,\n 'real': True,\n 'south': False,\n 'tcv': False,\n 'substorm': False,\n 'ions': False,\n 'move': False,\n 'cusp': False}\n\n arg_type = {'startdate': str,\n 'enddate': str,\n 'outfile': str,\n 'dt': float,\n 'real': bool,\n 'south': bool,\n 'tcv': bool,\n 'substorm': bool,\n 'ions': bool,\n 'move': bool,\n 'cusp': bool}\n \n # If there is input, set default help to False\n args['help'] = False if len(argv) > 0 else True\n \n # Cycle through all arguments except the first, saving input\n for arg in argv:\n # Treat the file list and formatting seperately\n if arg.find('-') == 0:\n # This is not a filename, remove the dash to get the key\n split_arg = arg.split('=')\n akey = split_arg[0][1:]\n # Get the argument value as the desired type\n if akey not in arg_type.keys():\n raise ValueError(''.join(['unknown command line input, ',\n arg, ', try -help for details']))\n\n if len(split_arg) == 1:\n if arg_type[akey] == bool:\n arg_val = True\n else:\n raise ValueError('expected equality after flag {:}'.format(\n akey))\n else:\n if arg_type[akey] == int:\n arg_val = int(split_arg[1])\n elif arg_type[akey] == float:\n arg_val = float(split_arg[1])\n elif arg_type[akey] == str:\n arg_val = split_arg[1]\n else:\n # This is boolean input\n arg_val = bool_string(split_arg[1])\n\n args[akey] = arg_val\n \n return args", "def validate_arguments(self,args):\n\t\tif args.org == None:\n\t\t\tprint('Please specify Organization name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.repo == None:\n\t\t\tprint('Please specify Repositories name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.event_type == None:\n\t\t\tprint('Please specify type of the event. Exiting.')\n\t\t\tsys.exit(0)", "def get_op():\n if len(sys.argv) < 2:\n print(\"Error! You must enter either '-e' or '-d' as the first argument.\")\n return -1\n return sys.argv[1]", "def test_args(self):\n parser = argparse.ArgumentParser(\n prog=\"sysbottle\", description=\"sysbottle is parsed\"\n )\n subparsers = parser.add_subparsers()\n sysbottle.build(subparsers)\n args = parser.parse_args(\n [\n \"sysbottle\",\n \"abc.txt\",\n \"-c\",\n \"90\",\n \"-q\",\n \"1\",\n \"-d\",\n \"sda\",\n \"-i\",\n \"5\",\n \"-t\",\n \"3\",\n ]\n )\n self.assertTrue(hasattr(args, \"file\"))\n self.assertTrue(hasattr(args, \"cpu\"))\n self.assertTrue(hasattr(args, \"diskQ\"))\n self.assertTrue(hasattr(args, \"disks\"))\n self.assertTrue(hasattr(args, \"iowait\"))\n self.assertTrue(hasattr(args, \"throughput\"))", "def test_robo():\n print(\n \"Choose the option of input:\\n\"\n \"1. Enter the command string\\n\"\n \"2. Select from input file\"\n )\n option = input(\"Enter 1 or 2 to select the input method:\")\n # Wait till the right option is chosen.\n while not (option == \"1\" or option == \"2\"):\n print(\"Invalid option, please enter valid option\")\n option = input(\"Enter 1 or 2 to select the input method:\")\n\n if option == \"1\":\n command = input(\"Enter the command as string:\")\n # To format the cli command to proper string.\n command = command[1:-1]\n robo = calculate_distance.Robot()\n robo.main(command)\n\n elif option == \"2\":\n file = open(os.path.dirname(__file__) + \"/input.yml\")\n parsed_input_file = yaml.load(file, Loader=yaml.FullLoader)\n command_list = parsed_input_file.get(\"input_list\")\n # To iterate through the list of commands given.\n for item in command_list:\n robo = calculate_distance.Robot()\n robo.main(item)", "def get_mode():\n\tprint(\"Please choose the file type that you want to normalize ('band'/'phband'/'dos'): \", end='')\n\twhile True:\n\t\tmode = input()\n\t\tif mode == \"b\" or mode == \"band\" : return \"band\"\n\t\telif mode == \"pb\" or mode == \"phband\": return \"phband\"\n\t\telif mode == \"d\" or mode == \"dos\" : return \"dos\"\n\t\telif mode == \"phd\" or mode == \"phdos\" :\n\t\t\tprint(\"phdos-files don't need to be normalized; if you wanna change the unit, plz try other programs.\"); exit(1)\n\t\telse: print(\"Plz enter 'band'/'b', 'phband'/'pb', or 'dos'/'d': \", end=\"\")", "def main(args):", "def main(args):", "def read_type():\n\twhile True:\n\t\t_type = input(\"Introduceti tipul: \")\n\t\tif (is_in_list(_type, VALID_TYPES)):\n\t\t\treturn (_type)\n\t\telse:\n\t\t\tprint(\"Tipul este invalid.\")", "def test_filename_required():\n with pytest.raises(SystemExit):\n cli.parse_args(['-f'])", "def ReadArguments():\n\n args = ParseArguments()\n\n logging.info('Command line arguments...')\n for arg in vars(args):\n logging.info(str(arg) + ': ' + str(getattr(args, arg)))\n logging.info('')\n\n IsTest(args)\n ProcessCacheSize(args)\n ProcessLineSize(args)\n ProcessMulti(args)\n ProcessMemPattern(args)\n ProcessMemFile(args)", "def parameters_are_valid():\n # The only accepted number of command line arguments is 3: they are\n # aggregator.py, the filename, and the topic\n if len(sys.argv) != 3:\n # Issue error message if invalid number of command line arguments\n print(\"Error: invalid number of arguments\")\n print(\"Usage: aggregator.py filename topic\")\n return False\n else:\n return True", "def test_exit_on_wrong_type(self):\n with self.assertRaises(SystemExit):\n pyint = Interpreter()\n pyint.run(file=WRONG_TYPE)", "def handleCmdLine(self):\n description = \"Nagios monitoring script to check for open ports\\n\"\n usage = (\"%prog <options>\\n\")\n parser = OptionParser(usage=usage, description=description)\n\n parser.add_option(\"-c\", \"--config\",\n type=\"string\",\n help=\"path to open ports configuration file\")\n parser.add_option(\"-l\", \"--list\",\n type=\"string\",\n help=\"supply list of allowed ports seperated by comma.\")\n\n (self.options, args) = parser.parse_args()", "def get_input_file():\n if len(sys.argv) < 3:\n return -1\n return sys.argv[2]", "def main():\n try:\n string = sys.argv[1]\n substring = sys.argv[2]\n\n except IndexError:\n string = None\n substring = None\n\n try:\n sys.argv[3]\n\n except IndexError:\n pass\n\n else:\n print(\" More than expected Number of Arguments\")\n string = None\n substring = None\n\n RabinKarp(string, substring)", "def parseArg(self, c):\n\n\t\tif rocks.app.Application.parseArg(self,c):\n\t\t\treturn 1\n\t\telif c[0] in ('-h', '--help'):\n\t\t\tself.help()\n\t\telif c[0] in ('-x', '--xml'):\n\t\t\tself.xmlname = c[1]\n\t\telse:\n\t\t\treturn 0\n\t\treturn 1", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def check_args(lst):\n # Must have at least as first argument the zone file to turn to yaml\n if len(lst) < 3 or not os.path.isfile(lst[1]):\n print \"This script needs at least a valid zone file path as argument\"\n print \"Its usage is\", sys.argv[0], \" zonefile_path domain_name\"\n print \"zonefile_path being the path to the zone file and domain_name the domain name\"\n sys.exit(\"Insufficient Number of Arguments\")", "def handle_cmdline():\n\n cmdline = ArgumentParser(init_args=['address', 'arch', 'file'],\n address_required=True, address_default=None,\n file_required=True,\n file_help='Flash or memory image to inspect',\n formatter_class=RawDescriptionHelpFormatter,\n usage=_USAGE, description=_DESCRIPTION, epilog=_EPILOG)\n\n cmdline.add_argument('--longhelp',\n choices=['Y', 'N'],\n default=None,\n help=_LONGHELP_TEXT)\n\n cmdline.add_argument('--autocomplete',\n choices=['Y', 'N'],\n default=None,\n help=_AUTOCOMPLETE_TEXT)\n\n cmdline.add_argument('--threshold',\n type=int,\n default=5,\n help='Minimum table size to report. Default: 5')\n\n cmdline.add_argument('--subcmds',\n action='store_true',\n default=False,\n help='Include sub-command tables in displayed results')\n\n cmdline.add_argument('--details',\n action='store_true',\n default=False,\n help='Display more detailed output')\n\n args = cmdline.parse_args()\n\n if args.longhelp is not None:\n args.longhelp = args.longhelp == 'Y'\n\n if args.autocomplete is not None:\n args.autocomplete = args.autocomplete == 'Y'\n\n return args", "def get_port_arg(self):\n\ttry:\n\t arg = sys.argv[4]\n\t port = int(arg) \n\texcept ValueError:\n\t print \"Port must be a number only.\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\")\t \n\texcept IndexError:\n\t print \"Port number must be provided.\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\")\t \n\tif any([port < 1024, port > 60000]):\n\t print \"Port must be between 1024 and 60000\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\")\t \n\telse:\n\t return port", "def process_check_input_argument():\n\n try:\n input_argv = sys.argv[1]\n if input_argv == \"0\":\n stand_alone_flag = 0\n else:\n stand_alone_flag = 0\n except IndexError:\n stand_alone_flag = 1\n\n return stand_alone_flag", "def getArg(flag):\n try:\n a = sys.argv[sys.argv.index(flag) + 1]\n except:\n return \"\"\n else:\n return a", "def parse_command_line_arguments(command_line_arguments):\n # First determine if a we're loading from a file\n filename = False\n if \"--file\" in command_line_arguments:\n index = command_line_arguments.index(\"--file\")+1\n filename = command_line_arguments[index]\n if output.tracking_suffix in filename:\n # Assumes simulation in progress. So, if final_sweep ==\n # current_sweep, the simulation will load and then\n # immediately end.\n return get_progress_file_info(filename)\n if not (output.output_suffix in filename):\n raise ValueError(\"Can only load from *.boundaryprg2p1 or \"\n +\"*.boundary2p1 files!\")\n # If filename is of type *.boundary2p1, we assume its okay and\n # load from it. None of the other command line arguments\n # change.\n\n if \"--select\" in command_line_arguments:\n index = command_line_arguments.index(\"--select\")+1\n if command_line_arguments[index] == \"std\":\n algorithm = monte_carlo.select_for_curvature\n elif command_line_arguments[index] == \"area\":\n algorithm = monte_carlo.select_for_area\n else:\n algorithm = default_algorithm\n else:\n algorithm = default_algorithm \n\n if \"--target-area\" in command_line_arguments:\n index = command_line_arguments.index(\"--target-area\")+1\n target_area = int(eval(command_line_arguments[index]))\n else:\n target_area = int(eval(command_line_arguments[0]))\n\n if \"--target-std\" in command_line_arguments:\n index = command_line_arguments.index(\"--target-std\")+1\n target_std = float(eval(command_line_arguments[index]))\n else:\n target_std = default_target_std\n\n if \"--area-damping\" in command_line_arguments:\n index = command_line_arguments.index(\"--area-damping\")+1\n area_damping_strength = float(eval(command_line_arguments[index]))\n else:\n area_damping_strength = default_area_damping\n if not 0 <= area_damping_strength <= 1:\n raise ValueError(\"Damping must be between 0 and 1.\")\n\n if \"--std-damping\" in command_line_arguments:\n index = command_line_arguments.index(\"--std-damping\")+1\n std_damping_strength = float(eval(command_line_arguments[index]))\n else:\n std_damping_strength = default_std_damping\n if not 0 <= area_damping_strength <= 1:\n raise ValueError(\"Damping must be between 0 and 1.\")\n\n if \"--initial\" in command_line_arguments:\n index = command_line_arguments.index(\"--initial\")+1\n initial_sweep = int(eval(command_line_arguments[index]))\n else:\n initial_sweep = default_initial_sweep\n\n if \"--final\" in command_line_arguments:\n index = command_line_arguments.index(\"--final\")+1\n final_sweep = int(eval(command_line_arguments[index]))\n else:\n final_sweep = default_final_sweep\n\n if \"--save\" in command_line_arguments:\n index = command_line_arguments.index(\"--save\")+1\n save_every_n_sweeps = int(eval(command_line_arguments[index]))\n else:\n save_every_n_sweeps = default_save_every_n_sweeps\n if save_every_n_sweeps < 1:\n raise ValueError(\"You must save at least every 1 sweeps!\")\n\n if \"--v5\" in command_line_arguments:\n index = command_line_arguments.index(\"--v5\")+1\n v5damping = int(eval(command_line_arguments[index]))\n else:\n v5damping = target_area/10\n\n if \"--v6\" in command_line_arguments:\n index = command_line_arguments.index(\"--v6\")+1\n v6damping = int(eval(command_line_arguments[index]))\n else:\n v6damping = target_area/10\n \n if \"--many\" in command_line_arguments:\n if \"--one\" in command_line_arguments or \"--exact\" in command_line_arguments:\n raise ValueError(\"Contradictory input!\")\n if \"--micro\" in command_line_arguments:\n gather_data_function = output.save_many_microscopically_optimal\n else:\n gather_data_function = output.gather_data_to_n_files\n elif \"--one\" in command_line_arguments:\n if \"--many\" in command_line_arguments or \"--exact\" in command_line_arguments:\n raise ValueError(\"Condtradictory input!\")\n if \"--micro\" in command_line_arguments:\n gather_data_function = output.stop_at_microscopically_optimal\n else:\n gather_data_function = output.gather_data_to_1_file\n elif \"--exact\" in command_line_arguments:\n if \"--many\" in command_line_arguments or \"--one\" in command_line_arguments:\n raise ValueError(\"Contradictory input!\")\n gather_data_function = output.generate_n_exact_spheres\n index = command_line_arguments.index(\"--exact\")+1\n # In this case, v5damping is fitness_damping, as defined\n # in generate_n_exact_spheres\n v5damping = int(eval(command_line_arguments[index]))\n else:\n if \"--micro\" in command_line_arguments:\n gather_data_function = output.stop_at_microscopically_optimal\n else:\n gather_data_function = output.gather_data_to_1_file\n\n # return a class with all the info we need\n params = parameters(filename, target_area, area_damping_strength,\n target_std, std_damping_strength,\n initial_sweep, final_sweep,\n save_every_n_sweeps,\n v5damping, v6damping,\n algorithm,\n gather_data_function)\n return params", "def validate_args(args):\n if len(args) != 4:\n print(\"ERROR: incorrect length of args.\")\n cli_help_msg()\n sys.exit()\n\n #first argument: check it is a read directory\n if args[1].lower().strip() == 'docid' or args[1].lower().strip() == 'docno':\n print(\"ERROR: incorrect key. Please pass 'docid' or 'docno'\")\n sys.exit()", "def main(args=None):", "def main(args=None):", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def read_opts(self):\n\n # process any optlist_ options\n self.valid_opts.check_special_opts(sys.argv)\n\n # ------------------------------------------------------------\n # terminal arguments, first\n\n # cannot have len(argv) <= 1 here, but be consistent with other progs\n if len(sys.argv) <= 1 or '-help' in sys.argv:\n print g_help_string\n return 0\n\n if '-hist' in sys.argv:\n print g_history\n return 0\n\n if '-ver' in sys.argv:\n print g_version\n return 0\n\n if '-show_valid_opts' in sys.argv:\n self.valid_opts.show('', 1)\n return 0\n\n # ------------------------------------------------------------\n # read all user options\n\n self.user_opts = self.OL.read_options(sys.argv, self.valid_opts)\n if not self.user_opts: return 1 # error condition\n\n return None # normal completion", "def getArgs():\r\n parser = argparse.ArgumentParser(\r\n description = \"\"\"This program uses the validation data and a given model to do brain segmentation that will be sent to FeTs challenge to get evaluated \"\"\")\r\n parser.add_argument(\"-d\", type = str, help = \"d is the path to validation dataset, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/\")\r\n parser.add_argument(\"-m\", type = str, help = \"m is the path for the model to load, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/cpt/cpt_0_1\")\r\n parser.add_argument(\"-o\", type = str, help = \"o is the output path, e.g: C:/Documents/inferences\")\r\n # Get your arguments\r\n return parser.parse_args()", "def runmain(args):\n\n #case: no extra args, so return help\n if len(args) == 0:\n print USAGE\n return\n\n toolname = args[1]\n if toolname == 'help':\n print USAGE\n return\n\n elif toolname == 'splitdata':\n splitdata(args)\n return\n \n elif toolname == 'aboutdata':\n aboutdata(args)\n return\n\n elif toolname == 'test':\n testffx(args)\n return\n \n else:\n print \"There is no toolname of '%s'.\" % toolname\n print USAGE\n return", "def _cli_validate(self, settings, remaining_argv):\n return None", "def grabRequiredArgs(passed_required_arg,required_arg,required_arg_type):\n \n # Check for correct number of arguments\n if len(passed_required_arg) - 1 < len(required_arg):\n print \"Missing arguments(s)\"\n print \"Try --help for options.\"\n sys.exit(1)\n \n # Pull command line values and place in dictionary with correct argument\n # identifier\n required_arg_values = {}\n for i in range(0,len(required_arg)):\n required_arg_values.update([(required_arg[i],passed_required_arg[i+1])])\n \n # Do a few sanity checks on required arguments (using required_arg_type)\n for index, arg in enumerate(required_arg):\n # Check for input file existance\n if required_arg_type[index] == 'inpfile':\n try:\n open(required_arg_values[arg],'r')\n except IOError:\n print \"File '\" + required_arg_values[arg] + \"' not readable!\"\n sys.exit(2)\n \n # Prompt before overwriting output files\n if required_arg_type[index] == 'outfile':\n try:\n open(required_arg_values[arg],'r')\n except IOError:\n continue\n decision = raw_input(\"Warning! File '\" + required_arg_values[arg] \\\n + \"' already exists! overwrite (y/n)? \")\n if decision[0] == 'Y' or decision[1] == 'y':\n os.remove(required_arg_values[arg])\n else:\n sys.exit()\n \n # Check for directory existance\n if required_arg_type[index] == 'outdir':\n try:\n os.listdir(required_arg_values[arg])\n except OSError:\n decision = raw_input(\"Directory '\" + required_arg_values[arg] + \\\n \"' does not exist. Create it (y/n)? \")\n if decision[0] == 'Y' or decision[0] == 'y':\n os.mkdir(required_arg_values[arg])\n else:\n sys.exit()\n \n # If there are any more arguments on the command line, report them and\n # ignore them.\n if len(passed_required_arg) - 1 > len(required_arg):\n print \"Trailing arguments (not evaluated):\"\n for trailer in passed_required_arg[len(required_arg):]:\n print trailer\n \n return required_arg_values", "def check_input_args(in_arg, phase=\"train\"):\n\n if phase==\"train\": \n # Check that flowers directory exists \n if not path.isdir(in_arg.data_dir):\n print(\"For data loading: can't find directory '{}' starting from '{}'. Please check the paths and run again!\" . format(in_arg.data_dir, os.getcwd()))\n sys.exit(0)\n \n # Check that checkpoints directory exists\n if not path.isdir(in_arg.save_dir):\n print(\"For checkpoints saving: can't find directory '{}' starting from '{}'. Please check the paths and run again!\" . format(in_arg.save_dir, os.getcwd()))\n sys.exit(0) \n \n else:\n # phase == predict\n # Check that the flower name exists. Example: \"/data/flowers/test/25/image_06583.jpg\"\n if not path.isfile(in_arg.image_file):\n print(\"Image file: can't find file '{}' starting from '{}'. Please check the path, filename and run again!\" . format(in_arg.image_file, os.getcwd()))\n sys.exit(0) \n \n if not path.isfile(in_arg.checkpoint):\n print(\"Checkpoint file: can't find file '{}' starting from '{}'. Please check the path, filename and run again!\" . format(in_arg.checkpoint, os.getcwd()))\n sys.exit(0)\n \n if in_arg.category_names and not path.isfile(in_arg.category_names):\n print(\"Category names file: can't find file '{}' starting from '{}'. Please check the path, filename and run again!\" . format(in_arg.category_names, os.getcwd()))\n sys.exit(0) \n \n # All cases\n\n # Check that the architecture is supported\n if in_arg.arch not in ['alexnet', 'resnet18', 'vgg19_bn']:\n print(\"Architecture can only be: alexnet, resnet18 or vgg19_bn. Please check the architecture and run again!\")\n sys.exit(0) \n \n # Check that a valid value has been set for gpu\n if in_arg.gpu != 0 and in_arg.gpu != 1:\n print(\"GPU can only be set to 0 (disable) or 1 (enable)! Please check the value and run again!\")\n sys.exit(0)", "def checkCommandArgs():\n try:\n int(sys.argv[1]) #sin\n int(sys.argv[2]) #sout\n int(sys.argv[3]) #csin\n except (ValueError, IndexError) as e:\n print (\"One or more port numbers are not ints or were not entered\")\n sys.exit()\n \n for i in range(3):\n if int(sys.argv[i+1]) > PORT_RANGE_UPPER or int(sys.argv[i+1]) < PORT_RANGE_LOWER:\n print(\"One or more port number out of range\")\n sys.exit()\n \n if not os.path.isfile(sys.argv[4]):\n print(\"file does not exist\")\n sys.exit()\n \n return int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), sys.argv[4]", "def test_type_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntype type\")\n assert bb.programtype[\"name\"] == \"type\"", "def run(self):\n if len(sys.argv[1:]) == 0:\n arg_parse.print_help()\n arg_parse.exit()\n\n try:\n if args.unix:\n self.from_unix_sec()\n print (\"Unix Seconds: \" + self.in_unix_sec + \" UTC\")\n elif args.umil:\n self.from_unix_milli()\n print (\"Unix Milliseconds: \" + self.in_unix_milli + \" UTC\")\n elif args.wh:\n self.from_win_64_hex()\n print (\"Windows 64 bit Hex BE: \" + self.in_windows_hex_64 + \" UTC\")\n elif args.whle:\n self.from_win_64_hexle()\n print (\"Windows 64 bit Hex LE: \" + self.in_windows_hex_le + \" UTC\")\n elif args.goog:\n self.from_chrome()\n print (\"Google Chrome Time: \" + self.in_chrome + \" UTC\")\n elif args.active:\n self.from_ad()\n print (\"Active Directory Timestamp: \" + self.in_ad + \" UTC\")\n elif args.uhbe:\n self.from_unix_hex_32be()\n print (\"Unix Hex 32 bit BE: \" + self.in_unix_hex_32 + \" UTC\")\n elif args.uhle:\n self.from_unix_hex_32le()\n print (\"Unix Hex 32 bit LE: \" + self.in_unix_hex_32le + \" UTC\")\n elif args.cookie:\n self.from_cookie()\n print (\"Windows Cookie Date: \" + self.in_cookie + \" UTC\")\n elif args.oleb:\n self.from_ole_be()\n print (\"Windows OLE 64 bit double BE: \" + self.in_ole_be + \" UTC\")\n elif args.olel:\n self.from_ole_le()\n print (\"Windows OLE 64 bit double LE: \" + self.in_ole_le + \" UTC\")\n elif args.mac:\n self.from_mac()\n print (\"Mac Absolute Time: \" + self.in_mac + \" UTC\")\n elif args.hfsdec:\n self.from_hfs_dec()\n print (\"Mac OS/HFS+ Decimal Date: \" + self.in_hfs_dec + \" UTC\")\n elif args.hfsbe:\n self.from_hfs_be()\n print (\"HFS/HFS+ 32 bit Hex BE: \" + self.in_hfs_be + \" HFS Local / HFS+ UTC\")\n elif args.hfsle:\n self.from_hfs_le()\n print (\"HFS/HFS+ 32 big Hex LE: \" + self.in_hfs_le + \" HFS Local / HFS+ UTC\")\n elif args.msdos:\n self.from_msdos()\n print (\"MS-DOS 32 bit Hex Value: \" + self.in_msdos + \" Local\")\n elif args.fat:\n self.from_fat()\n print (\"FAT Date + Time: \" + self.in_fat + \" Local\")\n elif args.sys:\n self.from_systime()\n print (\"Microsoft 128 bit SYSTEMTIME: \" + self.in_systemtime + \" UTC\")\n elif args.ft:\n self.from_filetime()\n print (\"Microsoft FILETIME/LDAP time: \" + self.in_filetime + \" UTC\")\n elif args.pr:\n self.from_prtime()\n print (\"Mozilla PRTime: \" + self.in_prtime + \" UTC\")\n elif args.auto:\n self.from_ole_auto()\n print (\"OLE Automation Date: \" + self.in_ole_auto + \" UTC\")\n elif args.ios:\n self.from_ios_time()\n print (\"iOS 11 Date: \" + self.in_iostime)\n elif args.sym:\n self.from_sym_time()\n print (\"Symantec AV Timestamp: \" + self.in_symtime)\n elif args.gps:\n self.from_gps_time()\n print (\"GPS Timestamp: \" + self.in_gpstime)\n elif args.timestamp:\n self.to_timestamps()\n elif args.guess:\n self.from_all()\n except Exception as e:\n logging.error(str(type(e)) + \",\" + str(e))", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Parse library type information.\")\n parser.add_argument(\"input_file\", help=\"Salmon library type information file.\")\n return parser.parse_args()", "def getopts():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", type=argparse.FileType('r'),\n required=True, help=\"input file (.csv)\")\n return parser.parse_args()", "def test_invalidargs(clickrunner):\n for args in maincli.invalid_args:\n result = clickrunner.invoke(maincli.entrypoint, args)\n assert result.exit_code == 2\n assert \"no such option\" in result.output", "def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale", "def test_cli_unknown(run):\n\n out, err, mocked_input = run(\n dork.cli.main, '-?', input_side_effect=['tester', '.rq'])\n assert 'Greetings' in out\n assert err == \"\"\n assert mocked_input.call_count == 2", "def conf_load_run_specification(fin):\n err_msg = \"Unknown specification. Excpected RUN:'name'.\"\n spec = fin.readline().strip().split(':')\n if len(spec) != 2 or spec[0] != 'RUN':\n raise EnvironmentError(err_msg)\n name = spec[1].strip()\n if len(name) == 0:\n raise EnvironmentError(\"Excpected non empty name for RUN(RUN:'name').\")\n return name", "def _get_input_from_argv():\n payload_index = sys.argv.index('--') + 1\n params = sys.argv[payload_index:]\n if not params:\n raise ValueError(\n \"A JSON payload was expected after the -- delimiter, but none \"\n \"was found.\")\n return ' '.join(params)", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def readCommand( argv ):\n from optparse import OptionParser\n usageStr = \"\"\"\n USAGE: python wumpus.py <options>\n EXAMPLES: python wumpus.py --agent DummyAgent\n OR python wumpus.py -a DummyAgent\n - run wumpus with the dummy agent\n \"\"\"\n parser = OptionParser(usageStr)\n \n parser.add_option('-a', '--agent', dest = 'agent',\n help = default('the agent to use'),\n metavar = 'TYPE', default = 'DummyAgent')\n parser.add_option('-w', '--width', dest ='width',\n help = default('World width'), default = 4)\n parser.add_option('-s', '--speed', dest ='speed',\n help = default('Speed'), default = 70)\n parser.add_option('-t', '--timeout', dest='timeout',\n help = default('Maximum search time (for debugging purpose)'), default = 2000)\n parser.add_option('-g', '--debugging', dest = 'debugging',\n help = 'For debuging purpose, set the random seed which generates the same world with the same seed', default = -1)\n \n options, otherjunk = parser.parse_args(argv)\n\n if len(otherjunk) != 0:\n raise Exception('Command line input not understood: ' + str(otherjunk))\n args = dict()\n \n # Choose a Wumpus solver\n try:\n module = __import__('agent')\n if options.agent in dir(module):\n agent = getattr(module, options.agent)\n args['agent'] = agent()\n else:\n raise Exception('Unknown agent: ' + options.agent)\n except ImportError:\n raise Exception('No file agent.py')\n \n args['width'] = int(options.width) + 2 # Add the borders.\n args['speed'] = int(options.speed)\n args['timeout'] = int(options.timeout)\n args['debugging'] = options.debugging\n\n return args", "def parse_args(self):\n #-----------------------------------------------------------------------\n #This code is based on code from the KR Toolkit by Christian Muise\n #URL: http://code.google.com/p/krtoolkit/\n try:\n argv, opts, flags = sys.argv[1:], {}, []\n while argv:\n if argv[0][0:2] == '--':\n flags.append(argv[0])\n argv = argv[1:]\n elif argv[0][0] == '-':\n opts[argv[0]] = argv[1]\n argv = argv[2:]\n else:\n raise InputException(\"Badly constructed arg: \" +argv[0])\n except IndexError:\n raise InputException(\"Badly constructed arg: \" + argv[0])\n #-----------------------------------------------------------------------\n for flag in flags:\n if flag in self.program_flags:\n vars(self)[self.program_flags[flag].var_name] = True\n if self.program_flags[flag].function:\n self.program_flags[flag].function(self)\n else:\n raise InputException(\"Invalid flag: \" + flag)\n \n if not self.quiet:\n min_width = max(len('Flags:'),\n max(map(lambda x : len(x.description),\n self.program_args.itervalues()))) + 1\n if len(flags) == 0:\n print \"{:<{}} {}\".format('Flags:', min_width,'<None>')\n else:\n print \"{:<{}} {}\".format('Flags:', min_width,\n ', '.join(filter(lambda f : f in flags,\n self.program_flags)))\n \n for arg in opts:\n if arg not in self.program_args:\n raise InputException(\"Invalid arg: \" + arg)\n \n for arg in self.program_arg_order:\n arg_def = self.program_args[arg]\n if arg not in opts:\n if arg_def.needed:\n raise InputException(\"Error needed arg is missing: \" + arg)\n vars(self)[arg_def.var_name] = arg_def.default_value\n else:\n if arg_def.validator == None:\n vars(self)[arg_def.var_name] = opts[arg]\n else:\n vars(self)[arg_def.var_name] = arg_def.validator(opts[arg],\n arg_def.validator_args)\n if not self.quiet:\n print \"{:<{}} {}\".format(arg_def.description + ':', min_width,\n vars(self)[arg_def.var_name])", "def parsare_argumente():\n for arg in sys.argv:\n if arg == \"-h\":\n display_usage()\n\n in_dir=\"input\"\n out_dir=\"output\"\n n=3\n timeout=10\n for arg in sys.argv[1:]:\n check = arg.split(\"=\")\n if len(check) < 2:\n print(\"invalid\")\n exit()\n if check[0] == \"if\":\n in_dir = ''.join(check[1:])\n elif check[0] == \"of\":\n out_dir = ''.join(check[1:])\n elif check[0] == 'n':\n try:\n n = int(''.join(check[1:]))\n except ValueError:\n print(\"nr invalid\")\n display_usage()\n elif check[0] == 't':\n try:\n timeout = int(''.join(check[1:]))\n except ValueError:\n print(\"nr invalid\")\n display_usage()\n\n return [in_dir, out_dir, n, timeout]", "def readCommand( argv ):\n from optparse import OptionParser\n usageStr = \"\"\"\n USAGE: python wumpus.py <options>\n EXAMPLES: (1) python wumpus.py\n - starts simple manual Hunt The Wumpus game\n (2) python wumpus.py -k OR python wumpus.py --kb\n - starts simple manual Hunt The Wumpus game with\n knowledge base and interactive queries possible\n \"\"\"\n parser = OptionParser(usageStr)\n\n parser.add_option('-k', '--kb', action='store_true', dest='kb', default=False,\n help=default(\"Instantiate a queriable knowledge base\"))\n parser.add_option('-y', '--hybrid', action='store_true', dest='hybrid', default=False,\n help=default(\"Run hybrid wumpus agent\" \\\n + \" (takes precedence over -k option)\"))\n parser.add_option('-l', '--layout', dest='layout', default=None,\n help=default(\"Load layout file\"))\n\n parser.add_option('-t', '--test', action='store_true', dest='test_minisat',\n default=False,\n help=default(\"Test connection to command-line MiniSat\"))\n\n options, otherjunk = parser.parse_args(argv)\n \n if len(otherjunk) != 0:\n raise Exception(\"Command line input not understood: \" + str(otherjunk))\n\n return options", "def parse_args():\n\n product_help = ('The type of output products to process. Can be '\n '\"individual\", \"composite\", or \"both\".')\n\n parser = argparse.ArgumentParser()\n parser.add_argument('product_type', action='store', type=str, help=product_help)\n args = parser.parse_args()\n\n # Make sure the argument is a valid option\n valid_options = ['individual', 'composite', 'both']\n explanation = '{} is not a valid option. Please choose \"individual\", \"composite\", or \"both\".'.format(args.product_type)\n assert args.product_type in valid_options, explanation\n\n return args", "def parseInputLine(self, line):\r\n if line is not None and line is not '':\r\n func = getattr(self, 'cmd_' + line.split()[0].upper(), None)\r\n if func is not None:\r\n func(line.split()[1:])\r\n else:\r\n self.terminal.write('No such command')\r\n self.showPrompt()" ]
[ "0.7166374", "0.67722404", "0.6072575", "0.60625833", "0.60188425", "0.5963004", "0.593148", "0.5922895", "0.59138274", "0.5889174", "0.5888878", "0.58836305", "0.5879857", "0.58746564", "0.5859843", "0.5856361", "0.5829581", "0.58261716", "0.58230263", "0.5807125", "0.5805372", "0.5757864", "0.5718999", "0.5717036", "0.569504", "0.5692562", "0.56755054", "0.56663656", "0.56654745", "0.56611365", "0.564919", "0.56489664", "0.562811", "0.56153977", "0.5614497", "0.5598462", "0.55952525", "0.5591032", "0.5591032", "0.5576977", "0.55755657", "0.5569013", "0.55526257", "0.5551375", "0.5546564", "0.5546538", "0.5546538", "0.55330896", "0.5532514", "0.5530876", "0.5529703", "0.5528288", "0.5522718", "0.5515282", "0.5514953", "0.55053383", "0.55053383", "0.5505108", "0.55030286", "0.5498748", "0.54936844", "0.5472797", "0.5464812", "0.5447572", "0.5442971", "0.5433334", "0.54303557", "0.54277474", "0.5422482", "0.5421113", "0.54182786", "0.54171807", "0.54166865", "0.54120076", "0.54084784", "0.54037094", "0.54037094", "0.54013616", "0.5400634", "0.5399884", "0.53986686", "0.5396497", "0.5395713", "0.53903335", "0.53850603", "0.538482", "0.5380289", "0.5379048", "0.5378603", "0.5376076", "0.53661335", "0.53645283", "0.536348", "0.5359537", "0.534749", "0.5341946", "0.533852", "0.53220135", "0.5314182", "0.53109", "0.5309988" ]
0.0
-1
Returns the week of the month for the specified date.
def week_of_month(dt): try: first_day = dt.replace(day=1) dom = dt.day if first_day.weekday() == 6: adjusted_dom = dom + day_of_week(dt) - 1 else: adjusted_dom = dom + day_of_week(dt) return int(ceil(adjusted_dom/7.0)) except Exception as e: log.exception("1;EME;FAILURE;700; FUNCTION ERROR " + str(e), exc_info=False) sys.exit(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_week_from_date(date) -> int:\n month, year = date.month, date.year\n if month < 4:\n year -= 1\n ld = _labor_day(year)\n wk1_wed = ld + timedelta(days=2)\n days_since = (date - wk1_wed).days\n weeks_since = days_since / 7.\n week = math.floor(weeks_since) + 1\n return int(week)", "def get_week(date):\n\n # TODO: the API seems broken. It returns week, year not year, week as documentef\n # why not use date.isocalendar() from the stdlib?\n\n date = date_trunc('week', date)\n\n first_monday = date_trunc('week', date_trunc('year', date))\n if first_monday.year < date.year:\n first_monday += datetime.timedelta(weeks=1)\n diff = date_trunc('day', date) - first_monday\n week = 1 + (diff.days / 7)\n return week, first_monday.year", "def week_of_month(dt):\n\n first_day = dt.replace(day=1)\n\n dom = dt.day\n adjusted_dom = dom + first_day.weekday()\n\n return int(ceil(adjusted_dom/7.0))", "def GetWeekNum(self, date):\n (y, m, d) = date.split('-')\n return (dt.date(int(y), int(m), int(d)) - self.START_DATE).days / 7", "def date_to_week(y, m, d):\r\n return datetime.datetime(y, m, d).strftime(r'%YW%W')", "def weeks_of_the_month(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'WeekNumber']]]]]:\n return pulumi.get(self, \"weeks_of_the_month\")", "def date_to_day_of_week(date):\n return date.weekday()", "def current_week_number(date=datetime.datetime.now()):\n return int(date.strftime(\"%W\"))", "def weeks_of_the_month(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"weeks_of_the_month\")", "def date_day_of_week(date):\n day_of_week = date.strftime('%A')\n return day_of_week", "def get_weekday_number(date):\n return date.strftime('%w')", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def get_next_monday(date):\n return date + datetime.timedelta(days=-date.weekday(), weeks=1)", "def get_weeks_for_month(month=None):\n if not month:\n month = dt.now().month\n return Week.objects.filter(date__month=month).order_by('number')", "def WEEKDAY(date, return_type=1):\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n return (_make_datetime(date).weekday() - first) % 7 + index", "def ISOWEEKNUM(\n date: func_xltypes.XlDateTime\n) -> func_xltypes.XlNumber:\n\n datetime_date = utils.number_to_datetime(int(date))\n isoweeknum = datetime_date.isocalendar()[1]\n return isoweeknum", "def day_of_week(date: datetime) -> str:\n weekday = date.weekday()\n return calendar.day_name[weekday]", "def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)", "def dateweek(line, date):\r\n\tindex = datetime.weekday(date)\r\n\tdateweek = '%s%s%s' % (date.day, cn2en.DATE_WEEK, cn2en.WEEKDAYS[index])\r\n\t\r\n\treturn dateweek == line", "def day_of_week(day, month, year):\n bias = (14 - month) // 12\n m_year = year - bias\n mth = month + 12 * bias - 2\n return (day + m_year + m_year // 4 - m_year // 100 + m_year // 400 + (31 * mth) // 12) % 7", "def get_week_from_datestr(datestr: str) -> int:\n return date.fromisoformat(datestr).isocalendar()[1]", "def current_week() -> int:\n now = datetime.now()\n return get_week_from_date(now)", "def WEEKNUM(date, return_type=1):\n if return_type == 21:\n return ISOWEEKNUM(date)\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n date = _make_datetime(date)\n jan1 = datetime.datetime(date.year, 1, 1)\n week1_start = jan1 - datetime.timedelta(days=(jan1.weekday() - first) % 7)\n return (date - week1_start).days // 7 + 1", "def date_to_dow(y, m, d):\r\n # Python uses Monday week start, so wrap around\r\n w = calendar.weekday(y, m, d) + 1\r\n if w == 7:\r\n w = 0\r\n return w", "def find_date(startdate, weekday, weeknumber):\n import datetime\n # The +1 makes this match up with linux times (day 1 = Monday)\n daysahead = weekday - (startdate.weekday() + 1)\n if daysahead < 0:\n # Target day already happened this week\n daysahead += 7\n # Add 7 days for each Week Of Month we want - but 'This' week is week 1\n daysahead += 7 * (weeknumber - 1)\n return startdate + datetime.timedelta(daysahead)", "def date_month(date):\n return date.month", "def date_week_of_year(date, *, sunday_is_first_day_of_week: bool = False):\n if sunday_is_first_day_of_week:\n return date.strftime(\"%U\")\n else:\n return date.strftime(\"%V\")", "def MONTH(date):\n return _make_datetime(date).month", "def week_start_on_monday(weekday):\n return (weekday - 1 + 6) % 7 + 1", "def day_of_week():\n return calendar.day_name[datetime.date.today().weekday()]", "def date_day_of_month(date):\n return date.day", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def date_day(date):\n return date_day_of_month(date)", "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "def week_range(date):\n # isocalendar calculates the year, week of the year, and day of the week.\n # dow is Mon = 1, Sat = 6, Sun = 7\n year, week, dow = date.isocalendar()\n\n # Find the first day of the week.\n if dow == 7:\n # Since we want to start with Sunday, let's test for that condition.\n start_date = date\n else:\n # Otherwise, subtract `dow` number days to get the first day\n start_date = date - timedelta(dow)\n\n return start_date, start_date + timedelta(6)", "def dayofweek(day, month, year, formatresult=True):\n if formatresult is False:\n return calendar.weekday(year, month, day) + 1\n days = {\n 0: 'Monday',\n 1: \"Tuesday\",\n 2: \"Wednesday\",\n 3: \"Thursday\",\n 4: \"Friday\",\n 5: \"Saturday\",\n 6: \"Sunday\"\n }\n return days[calendar.weekday(year, month, day)]", "def DayOfWeek(year, month, day):\n num = year * 365\n num = num + year // 4 + 1\n num = num - (year // 100 + 1)\n num = num + year // 400 + 1\n if month < 3 and LeapYear(year):\n num = num - 1\n return (num + MONTH_OFFSETS[month - 1] + day + 4) % 7 + 1", "def Day_of_week(day, month, year):\r\n if year % 4 == 0 and (year % 400 == 0 or year % 100 != 0):\r\n doomsday = [11, 29, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n else:\r\n doomsday = [10, 28, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n exact_day = ((day - doomsday[month-1]) + Dooms_day(year)) % 7\r\n character_day = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \r\n \"Friday\", \"Saturday\"]\r\n return character_day[exact_day]", "def get_week_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof - TimeDelta(days=(asof.isoweekday() - 1) % 7)", "def the_week_url():\n return '/timeline/%d/%02d/%d/' % \\\n (datetime.now().year, datetime.now().month, timekit.monthweek(datetime.now()))", "def get_week():\n now = dt.now().date()\n return Week.objects.filter(date__lte=now).order_by('-date').first()", "def day_of_the_week(arg):", "def date_with_day_of_week_appended(mydate): \n import datetime\n month, day, year = (int(x) for x in mydate.split('/')) \n shortened_year = abs(year) % 100 \n day_of_week = datetime.date(year, month, day).strftime(\"%A\")\n return \"%s/%s/%s %s\" % (month,day,shortened_year, day_of_week)", "def weekly():", "def week(start_day=\"monday\"):\r\n today = datetime.datetime.combine(datetime.date.today(), datetime.datetime.min.time())\r\n weekday = today.weekday()\r\n week = list(calendar.Calendar(getattr(calendar, start_day.upper())).iterweekdays())\r\n pos = week.index(weekday)\r\n week_stamp = (to_stamp(today - datetime.timedelta(days=pos - i)) for i, d in enumerate(week))\r\n return collections.OrderedDict(((calendar.day_name[a], (b, b + DAY)) for a, b in zip(week, week_stamp)))", "def week(self):\n if self._week.lower() == 'wild card':\n return WILD_CARD\n if self._week.lower() == 'division':\n return DIVISION\n if self._week.lower() == 'conf. champ.':\n return CONF_CHAMPIONSHIP\n if self._week.lower() == 'superbowl':\n return SUPER_BOWL\n return self._week", "def weekday(self):\n\n return func.extract('dow', self.start_date) + 1", "def monday_last_week():\n today = datetime.date.today()\n last_week = today - datetime.timedelta(days=7)\n return last_week - datetime.timedelta(days=(last_week.isoweekday() - 1))", "def day_of_week(self):\n # 1 Jan 0001 was Monday according to the proleptic Gregorian calendar.\n # So, 1 Jan 0001 has ordinal 1, and the weekday is 0.\n return (self._ordinals - 1) % 7", "def weekday(self):\n if self.month is not None and self.day is not None:\n return self.todate().weekday()\n else:\n return None", "def _get_date_in_words(number_date):\n month_list = ['Jan ', 'Feb ', 'Mar ', 'Apr ', 'May ', 'Jun ', 'Jul ', 'Aug ', 'Sep ', 'Oct ', 'Nov ', 'Dec ']\n month = int(number_date[5:7]) - 1\n day = str(int(number_date[-2:]))\n return month_list[month] + day", "def get_today_week_number(self):\n\n today = date.today()\n iso_result = today.isocalendar()\n return iso_result[1]", "def weekdayname(self, date):\n weekday = weekdayname_msgid(date.dow())\n return translate(weekday, domain='plonelocales',\n context=self.request, default=weekday)", "def first_monday_of_week(year, week):\n weekyear = \"{} {} 1\".format(year, week)\n return time.asctime(time.strptime(weekyear, \"%Y %U %w\"))", "def _set_date_weekly(self):\n dt_weekday = dt.now()\n try:\n dt_weekday = self._get_datetime_or_error()\n except ValueError:\n self._dt_string = \"\"\n raise InvalidDateError(detail={\n \"message\": \"Invalid Date Provided\",\n \"period\": self.period.value,\n \"date\": self._given_date\n })\n week_start = dt_weekday - timedelta(days=dt_weekday.weekday())\n self.date['year'] = week_start.year\n self.date['month'] = week_start.month\n self.date['day'] = week_start.day", "def get_current_week(self):\n result = self._method_call(\"CurrentWeek\")\n return int(result)", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "def week_of_month(self, week_of_month):\n allowed_values = [\"first\", \"second\", \"third\", \"fourth\", \"last\"] # noqa: E501\n if week_of_month not in allowed_values:\n raise ValueError(\n \"Invalid value for `week_of_month` ({0}), must be one of {1}\" # noqa: E501\n .format(week_of_month, allowed_values)\n )\n\n self._week_of_month = week_of_month", "def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])", "def get_week(time_index):\n return np.array(time_index.week).reshape(-1,1)", "def forecast_weekly():\n forecast = get_forecast()\n daily = forecast.daily()\n return daily.summary", "def get_current_week_range(self, currdate):\n dow_start = datetime.datetime.strftime(currdate, '%w')\n if dow_start == '0':\n week_start = currdate\n else:\n week_start = self.get_previous_byday('Sunday', currdate)\n\n week_end = week_start + datetime.timedelta(days=6)\n return (week_start, week_end)", "def getCurrentWeek(self):\n return self.wcount % 48", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def day_of_month():\n return datetime.date.today().day", "def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]", "def set_week_day(self, wday):\r\n\t\twdays = ['Domingo', 'Lunes', 'Martes', 'Miercoles',\r\n\t\t\t\t 'Jueves', 'Viernes', 'Sabado']\r\n\t\tfor i in range(7):\r\n\t\t\tif wday == i: \r\n\t\t\t\treturn wdays[i]", "def meetup_day(year, month, day_of_week, day_occurrence):\n \n cal = calendar.monthcalendar(year, month)\n day_of_week_index = days_of_week[day_of_week]\n \n not_teenth = day_occurrence != 'teenth'\n day_is_in_first_week = cal[0][day_of_week_index] != 0\n \n if not_teenth and day_is_in_first_week:\n week_index = week_indices[day_occurrence]\n \n elif not_teenth and not day_is_in_first_week:\n week_index = week_indices[day_occurrence] + 1\n \n else:\n for i in range(len(cal)):\n if cal[i][day_of_week_index] >= 10:\n week_index = i\n break\n\n date = cal[week_index][day_of_week_index]\n return datetime.date(year, month, date)", "def meetup_day(year, month, dow, wom):\n first_dow = monthrange(year, month)[0]\n days_in_month = monthrange(year, month)[1]\n possible_dates = []\n print str(year) + str(month) + dow + wom\n\n \"\"\"Build dictionary of possible dates based on dow\"\"\"\n for day in range(1, days_in_month+1):\n if datetime.date(year, month, day).strftime(\"%A\") == dow:\n print day\n possible_dates.extend([day])\n\n \"\"\"Perform logic on wom constraint\"\"\"\n if wom == \"teenth\":\n for day in possible_dates:\n if day > 12 and day < 20:\n return datetime.date(year, month, day)\n elif wom == \"last\":\n return datetime.date(year, month, possible_dates[-1])\n else:\n return datetime.date(year, month, possible_dates[ int(wom[:1]) - 1 ])", "def dates_of_the_week():\n date_list = list()\n now = datetime.datetime.now()\n monday = now - datetime.timedelta(days=now.weekday(), hours=now.hour, minutes=now.minute, seconds=now.second,\n microseconds=now.microsecond)\n date_list.append(monday)\n for each in range(1, 6):\n monday = monday + datetime.timedelta(days=1)\n date_list.append(monday)\n date_list.append((monday + datetime.timedelta(days=1, hours=23, minutes=59, seconds=59)))\n return date_list", "def week(self) -> Index:\n warnings.warn(\n \"`week` is deprecated in 3.5.0 and will be removed in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.week)", "def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass", "def _compute_week_index(self, column_index):\n latest_week = self.end_date.isocalendar()[1]\n column_date = self.start_date + timedelta(column_index)\n week = column_date.isocalendar()[1]\n week_index = latest_week - week\n return week_index", "def GetWeekDay(self):\n if self.day is None:\n if self.week:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n self.week,\n None)\n elif self.month is None:\n if self.year is None:\n return (self.century, None, None, None, None)\n else:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n None,\n None)\n else:\n raise DateTimeError(\"can't get week day with month precision\")\n else:\n century, year, ordinalDay = self.GetOrdinalDay()\n year += century * 100\n if LeapYear(year):\n yearLength = 366\n else:\n yearLength = 365\n weekday = DayOfWeek(year, self.month, self.day)\n thursday = ordinalDay + 4 - weekday\n if thursday < 1:\n # Thursday this week was actually last year, and so we are\n # part of the last calendar week of last year too.\n # may return year==0\n year -= 1\n week = WeekCount(year)\n elif thursday > yearLength:\n # Thursday this week is actually next year, and so we are\n # part of the first calendar week of next year too.\n # may return century=100\n year += 1\n week = 1\n else:\n # We are part of this year, but which week?\t Jan 4th is always\n # part of the first week of the year, so we calculate the ordinal\n # value of the Monay that began that week\n yearBase = 5 - DayOfWeek(year, 1, 4)\n week = (ordinalDay - yearBase) // 7 + 1\n return year // 100, (year % 100) // 10, (year % 10), week, weekday", "def find_day_of_week(year, month, day_of_week, offset=0, use_datetime=False):\n iter = Calendar().itermonthdates(year, month)\n n = 0\n\n for value in iter:\n if month != value.month:\n continue\n\n if day_of_week == weekday(value.year, value.month, value.day):\n if n == offset:\n return convert_date_to_datetime(value) if use_datetime else value\n else:\n n += 1\n\n return None", "def day_of_week(self) -> str:\n return self.elements[4]", "def get_next_week(self, startdate):\n dow_today = int(datetime.datetime.strftime(startdate, '%w'))\n days_until_sunday = 7 - ((dow_today + 7) % 7)\n #days_until_sunday = 7 - (dow_today + 1)\n sunday = startdate + datetime.timedelta(days=days_until_sunday)\n following_saturday = sunday + datetime.timedelta(days=6)\n next_week = (sunday, following_saturday)\n return next_week", "def workweeks(yr):\n\n # TODO: MOVE all of this crap into a intelDateTime.py module. Does not belong here. JSS\n\n nyd = datetime.date(yr, 1, 1).weekday() # Determine the day of the week on which the 1st of January fell this year.\n if nyd == 5: return 53 # If the 1st of January fell on a Saturday, the year has 53 weeks.\n if nyd == 4 and isleapyear(yr): return 53 # Same deal if the 1st of January fell on a Friday in a leap year.\n return 52 # All other years have 52 work weeks.", "def WEEKDAY(\n serial_number: func_xltypes.XlNumber,\n return_type: func_xltypes.XlNumber = None\n) -> func_xltypes.XlNumber:\n\n date = utils.number_to_datetime(int(serial_number))\n\n if return_type is None:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 1:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n # weekday() is 0 based, starting on a Monday\n elif int(return_type) == 2:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 3:\n # Numbers 0 (Monday) through 6 (Sunday)\n weekDays = (0, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 11:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 12:\n # Numbers 1 (Tuesday) through 7 (Monday)\n weekDays = (7, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 13:\n # Numbers 1 (Wednesday) through 7 (Tuesday)\n weekDays = (6, 7, 1, 2, 3, 4, 5)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 14:\n # Numbers 1 (Thursday) through 7 (Wednesday)\n weekDays = (5, 6, 7, 1, 2, 3, 4)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 15:\n # Numbers 1 (Friday) through 7 (Thursday)\n weekDays = (4, 5, 6, 7, 1, 2, 3)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 16:\n # Numbers 1 (Saturday) through 7 (Friday)\n weekDays = (3, 4, 5, 6, 7, 1, 2)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 17:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n else:\n raise xlerrors.NumExcelError(\n f\"return_type needs to be omitted or one of 1, 2, 3, 11, 12, 13,\\\n 14, 15, 16 or 17. You supplied {return_type}\")", "def _get_next_monday(self):\n today = datetime.date.today()\n weekday_int = today.weekday()\n if weekday_int == 0:\n return today\n next_mon = today + timedelta(7 - weekday_int)\n return next_mon", "def GetFirstSundayOfMonth(year, month):\n weeks = calendar.Calendar().monthdays2calendar(year, month)\n # Return the first day in the first week that is a Sunday.\n return [date_day[0] for date_day in weeks[0] if date_day[1] == 6][0]", "def get_weekday():\n result = datetime.today().weekday() + 1\n return result", "def _DayNumToWeekdayNum(daynum):\n return (daynum + _WEEKDAY_BASE) % NUM_WEEKDAYS", "def get_weekday(self):\n originDate = Date(1900, 1, 1)\n return WEEKDAYS[originDate.days_since(self) % 7]", "def get_week_of_year(date, padded_or_unpadded, start_Sunday_or_Monday):\n if start_Sunday_or_Monday == constants.str_Sunday:\n week_of_year = date.strftime('%U')\n elif start_Sunday_or_Monday == constants.str_Monday:\n week_of_year = date.strftime('%W')\n else:\n err_msg = str_possible_values('start_Sunday_or_Monday', [\n constants.str_Sunday, constants.str_Monday])\n raise ValueError(err_msg)\n\n if padded_or_unpadded == constants.str_padded:\n return week_of_year\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(week_of_year))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def get_the_weekday(self,date):\n date_convert = date.split('-')\n week_days = (\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\")\n date_list = [int(i) for i in date_convert]\n day = datetime.date(date_list[0], date_list[1], date_list[2])\n # convert weekday into digit (eg Mon -> 0,)\n num_day = day.weekday()\n day_as_string = week_days[num_day]\n return day_as_string", "def get_weekdays(date: str) -> list:\n parsed_date = parser.parse(date)\n day_of_week = parsed_date.weekday()\n first_day_of_week = parsed_date - datetime.timedelta(days=day_of_week)\n\n return holiday.create_date_range(first_day_of_week, 7)", "def test_monday(self):\n date = datetime.date(1981, 5, 4)\n self.assertEqual(date.isoweekday(), 1)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "def formatWeek(self, themonth, theweek, num_weeks):\n s = ''.join(self.formatDay(themonth, d, num_weeks) for d in theweek)\n return '<tr>%s</tr>' % s", "def get_date_in_two_weeks():\n today = datetime.datetime.today()\n date_in_two_weeks = today + datetime.timedelta(days=14)\n return date_in_two_weeks.date()", "def current_week(self):\n\n if not self.iso_equal() and self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 2\n if not self.iso_equal() or self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 1 \n return self.time_stamp_iso[1]", "def check_weekday_of_date(self, date):\n return date.isoweekday() % 7", "def get_day_of_week_string(date_string):\n\n # Split on / string, and feed to a datetime object, to use weekday function\n date_strings = date_string.split(\"/\")\n update_date = datetime.datetime(int(date_strings[2]), int(date_strings[1]), int(date_strings[0]))\n weekDays = (\"Mon\", \"Tue\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\")\n day_of_week = str(weekDays[update_date.weekday()])\n return day_of_week", "def test_weeks(self):\n d = datetime(2014, 1, 29)\n eq_(week_start(d), datetime(2014, 1, 27, 0, 0, 0))\n eq_(week_end(d), datetime(2014, 2, 2, 23, 59, 59))", "def get_current_day_week_number(week_delta=0):\n return (datetime.today() + timedelta(weeks=week_delta)).isocalendar()[1]", "def dow(values, feature, parent): \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None", "def dow(values, feature, parent): \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None" ]
[ "0.7487706", "0.7056256", "0.70537466", "0.70099276", "0.70012397", "0.6820069", "0.6787127", "0.66652584", "0.6636667", "0.651747", "0.6508568", "0.644575", "0.6422966", "0.64131665", "0.6229527", "0.6217697", "0.62044543", "0.61213183", "0.60555804", "0.6036671", "0.6029071", "0.60265785", "0.60136586", "0.6007771", "0.5957888", "0.59162563", "0.58802545", "0.587622", "0.58497775", "0.5796988", "0.57785064", "0.5767196", "0.57330704", "0.57207453", "0.56769633", "0.56669176", "0.56077415", "0.5603615", "0.5584805", "0.55747443", "0.5565038", "0.55476975", "0.5521967", "0.54931575", "0.5490721", "0.5360772", "0.53543615", "0.53540415", "0.5352934", "0.5348617", "0.5329983", "0.5329249", "0.53237426", "0.53219616", "0.532178", "0.5314833", "0.531229", "0.531229", "0.5303133", "0.5303133", "0.529916", "0.52952474", "0.5294984", "0.5290728", "0.52878475", "0.5241577", "0.5216507", "0.52069634", "0.5205545", "0.51964587", "0.51932096", "0.5188195", "0.5178408", "0.51759404", "0.517013", "0.51687855", "0.51474416", "0.5144719", "0.51258004", "0.5124162", "0.5122752", "0.51202756", "0.509934", "0.50988144", "0.5092133", "0.50859004", "0.50788414", "0.507361", "0.50704086", "0.5067984", "0.50455415", "0.5017548", "0.50129634", "0.50049096", "0.50008965", "0.49954584", "0.49748206", "0.49518907", "0.4944139", "0.4944139" ]
0.65848774
9
Returns customized day of week as first day Default first day of week is Monday mday used to set firstweek of day
def day_of_week(dt): cday = dt mday = 2 uday = cday.isocalendar()[2] + mday try: if uday > 7: CURRDAY = uday - 7 log.debug("1;EME;RUNNING;000;Scheduler.py;Setting customized day of week>7 : ", CURRDAY) else: CURRDAY = uday log.debug("1;EME;RUNNING;000;Scheduler.py;Setting customized day of week : ", CURRDAY) return CURRDAY except Exception as e: log.exception("1;EME;FAILURE;700;SCHEDULE ERROR " + str(e), exc_info=False) sys.exit(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first_day_of_week(self):\n return self.__first_day_of_week", "def locale_first_weekday():\n\tfirst_weekday = 6 #by default settle on monday\n\n\ttry:\n\t\tprocess = os.popen(\"locale first_weekday week-1stday\")\n\t\tweek_offset, week_start = process.read().split('\\n')[:2]\n\t\tprocess.close()\n\t\tweek_start = datetime.date(*time.strptime(week_start, \"%Y%m%d\")[:3])\n\t\tweek_offset = datetime.timedelta(int(week_offset) - 1)\n\t\tbeginning = week_start + week_offset\n\t\tfirst_weekday = int(beginning.strftime(\"%w\"))\n\texcept:\n\t\tprint \"WARNING - Failed to get first weekday from locale\"\n\n\treturn first_weekday", "def _FirstSunday(self, dtz): # pylint: disable-msg=C0103,R0201\n return dtz + datetime.timedelta(days=(6-dtz.weekday()))", "def first_week_day(self) -> int:\n return self._data['week_data']['first_day']", "def day_of_week_for_start_day(self):\n import calendar\n\n day = self.idfobjects[\"RUNPERIOD\"][0][\"Day_of_Week_for_Start_Day\"]\n\n if day.lower() == \"sunday\":\n return calendar.SUNDAY\n elif day.lower() == \"monday\":\n return calendar.MONDAY\n elif day.lower() == \"tuesday\":\n return calendar.TUESDAY\n elif day.lower() == \"wednesday\":\n return calendar.WEDNESDAY\n elif day.lower() == \"thursday\":\n return calendar.THURSDAY\n elif day.lower() == \"friday\":\n return calendar.FRIDAY\n elif day.lower() == \"saturday\":\n return calendar.SATURDAY\n else:\n return 0", "def day_of_week():\n return calendar.day_name[datetime.date.today().weekday()]", "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "def week_start_on_monday(weekday):\n return (weekday - 1 + 6) % 7 + 1", "def set_week_day(self, wday):\r\n\t\twdays = ['Domingo', 'Lunes', 'Martes', 'Miercoles',\r\n\t\t\t\t 'Jueves', 'Viernes', 'Sabado']\r\n\t\tfor i in range(7):\r\n\t\t\tif wday == i: \r\n\t\t\t\treturn wdays[i]", "def get_day_of_week() -> str:\n return datetime.now(pytz.timezone('US/Eastern')).strftime(\"%a\").lower()", "def day_of_week(self) -> str:\n return self.elements[4]", "def get_weekday():\n result = datetime.today().weekday() + 1\n return result", "def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])", "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "def get_day_today() -> str:\n day = datetime.now().strftime(\"%w\")\n if day == '0': # Sunday\n return '6'\n elif day == '6': # Saturday\n return '5'\n elif day == '1': # Monday\n return '0'\n elif day == '2': # Tuesday\n return '1'\n elif day == '3': # Wednesday\n return '2'\n elif day == '4': # Thursday\n return '3'\n elif day == '5': # Friday\n return '4'", "def day_of_week(self):\n # 1 Jan 0001 was Monday according to the proleptic Gregorian calendar.\n # So, 1 Jan 0001 has ordinal 1, and the weekday is 0.\n return (self._ordinals - 1) % 7", "def weekday(self):\n return (self.toordinal() + 6) % 7", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def _get_next_monday(self):\n today = datetime.date.today()\n weekday_int = today.weekday()\n if weekday_int == 0:\n return today\n next_mon = today + timedelta(7 - weekday_int)\n return next_mon", "def weekday(day):\n return (day % 7) - 1", "def weekday(self):\n return 0", "def weekday(self):\n return 0", "def day_of_the_week(arg):", "def day_of_week(date: datetime) -> str:\n weekday = date.weekday()\n return calendar.day_name[weekday]", "def first_monday_of_week(year, week):\n weekyear = \"{} {} 1\".format(year, week)\n return time.asctime(time.strptime(weekyear, \"%Y %U %w\"))", "def get_first_day_of_the_week(self):\n if SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=1\n ).exists():\n return 1\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=2\n ).exists():\n return 2\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=3\n ).exists():\n return 3\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=4\n ).exists():\n return 4\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=5\n ).exists():\n return 5\n else:\n return 6", "def date_day_of_week(date):\n day_of_week = date.strftime('%A')\n return day_of_week", "def get_week_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof - TimeDelta(days=(asof.isoweekday() - 1) % 7)", "def get_weekday(self):\n originDate = Date(1900, 1, 1)\n return WEEKDAYS[originDate.days_since(self) % 7]", "def get_weekday(self):\n weekdays = dict(PRODUCT_WEEKDAYS)\n return weekdays.get(self.weekday, \"N/A\")", "def get_next_monday(date):\n return date + datetime.timedelta(days=-date.weekday(), weeks=1)", "def day(self) -> Optional[pulumi.Input[Union[str, 'WeekDay']]]:\n return pulumi.get(self, \"day\")", "def next_sunday(day):\n if day.weekday() == 6: # sunday\n return day + timedelta(days=7)\n else:\n return day + timedelta(days=(6 - day.weekday()))", "def weekday(self):\n if self.month is not None and self.day is not None:\n return self.todate().weekday()\n else:\n return None", "def dow_1(self):\n return self._dayoffset + 1", "def next_seven_day(self):\n today = datetime.date.today()\n week_next = today + datetime.timedelta(days=7)\n return week_next.strftime('%Y-%m-%d')", "def weekday(self):\n\n return func.extract('dow', self.start_date) + 1", "def weekday(self) -> int:\n return WD_EN.index(self.time.day.lower())", "def first_month_day():\r\n return datetime.now().replace(day=1).strftime('%d-%m-%Y')", "def weekdayname(self, date):\n weekday = weekdayname_msgid(date.dow())\n return translate(weekday, domain='plonelocales',\n context=self.request, default=weekday)", "def start_day_of_weekend(self):\n return self._start_day_of_weekend", "def day_name(self):\n ref = Date(11, 11, 2019)\n day_names = ['Monday', 'Tuesday', 'Wednesday', \n 'Thursday', 'Friday', 'Saturday', 'Sunday']\n days = self.days_between(ref)\n self_day = day_names[days%7]\n return self_day", "def day_name(x):\r\n if x==0:\r\n return \"Sunday\"\r\n elif x==1:\r\n return \"Monday\"\r\n elif x==2:\r\n return \"Tuesday\"\r\n elif x==3:\r\n return \"Wednesday\"\r\n elif x==4:\r\n return \"Thursday\"\r\n elif x==5:\r\n return \"Friday\"\r\n elif x==6:\r\n return \"Saturday\"", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)", "def date_week_of_year(date, *, sunday_is_first_day_of_week: bool = False):\n if sunday_is_first_day_of_week:\n return date.strftime(\"%U\")\n else:\n return date.strftime(\"%V\")", "def first_day_of_month():\n first_object = datetime.utcnow()\n first_string = first_object.strftime('%m/01/%Y')\n return first_string", "def get_the_weekday(self,date):\n date_convert = date.split('-')\n week_days = (\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\")\n date_list = [int(i) for i in date_convert]\n day = datetime.date(date_list[0], date_list[1], date_list[2])\n # convert weekday into digit (eg Mon -> 0,)\n num_day = day.weekday()\n day_as_string = week_days[num_day]\n return day_as_string", "def Day_of_week(day, month, year):\r\n if year % 4 == 0 and (year % 400 == 0 or year % 100 != 0):\r\n doomsday = [11, 29, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n else:\r\n doomsday = [10, 28, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n exact_day = ((day - doomsday[month-1]) + Dooms_day(year)) % 7\r\n character_day = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \r\n \"Friday\", \"Saturday\"]\r\n return character_day[exact_day]", "def get_next_weekend():\n d = datetime.date.today()\n # day 5 for saturday\n t = datetime.timedelta((7 + 5 - d.weekday()) % 7)\n return (d + t).strftime('%d-%m-%Y')", "def weekday(self, dt):\n days = {\n 0: self.MONDAY,\n 1: self.TUESDAY,\n 2: self.WEDNESDAY,\n 3: self.THURSDAY,\n 4: self.FRIDAY,\n 5: self.SATURDAY,\n 6: self.SUNDAY\n }\n return days.get(dt.weekday())", "def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]", "def isoweekday(self):\n # 1-Jan-0001 is a Monday\n return self.toordinal() % 7 or 7", "def date_to_dow(y, m, d):\r\n # Python uses Monday week start, so wrap around\r\n w = calendar.weekday(y, m, d) + 1\r\n if w == 7:\r\n w = 0\r\n return w", "def distributeWeekday(self, jan1):\n self.firstday = jan1\n for day in self.daylist:\n if jan1%7 == 6 or jan1%7 == 0:\n day.weekday = 'weekend'\n jan1 = jan1 + 1", "def date_to_day_of_week(date):\n return date.weekday()", "def get_weekday_number(date):\n return date.strftime('%w')", "def GetFirstSundayOfMonth(year, month):\n weeks = calendar.Calendar().monthdays2calendar(year, month)\n # Return the first day in the first week that is a Sunday.\n return [date_day[0] for date_day in weeks[0] if date_day[1] == 6][0]", "def next_weekday(weekday, d=datetime.datetime.now()):\n if weekday.lower() not in day_values:\n return None\n days_ahead = day_values[weekday.lower()] - d.weekday()\n if days_ahead <= 0: # Target day already happened this week\n days_ahead += 7\n return d + datetime.timedelta(days_ahead)", "def assign_numeric_day(x):\n\n if x == 'Sunday':\n return 0\n elif x == 'Monday':\n return 1\n elif x == 'Tuesday':\n return 2\n elif x == 'Wednesday':\n return 3\n elif x == 'Thursday':\n return 4\n elif x == 'Friday':\n return 5\n elif x == 'Saturday':\n return 6", "def get_weekday():\n try:\n day = config.getint(\"threadbot\", \"debug_day\")\n except ConfigParser.NoOptionError:\n d = datetime.date.today()\n day = d.weekday()\n sort_by_new = False\n\n # 0 / Monday / Feedback thread\n # 1 / Tuesday / How do I make this sound thread\n # 2 / Wednesday / There are no stupid questions thread\n # 3 / Thursday / Marketplace thread\n dayname = \"waffles\"\n if day == 0:\n dayname = \"monday\"\n sort_by_new = True\n elif day == 1:\n dayname = \"tuesday\"\n sort_by_new = True\n elif day == 2:\n dayname = \"wednesday\"\n sort_by_new = True\n elif day == 3:\n dayname = \"thursday\"\n sort_by_new = False\n else:\n sys.exit(1) # woo inelegance\n\n return dayname, sort_by_new", "def meetup_day(year, month, day_of_week, day_occurrence):\n \n cal = calendar.monthcalendar(year, month)\n day_of_week_index = days_of_week[day_of_week]\n \n not_teenth = day_occurrence != 'teenth'\n day_is_in_first_week = cal[0][day_of_week_index] != 0\n \n if not_teenth and day_is_in_first_week:\n week_index = week_indices[day_occurrence]\n \n elif not_teenth and not day_is_in_first_week:\n week_index = week_indices[day_occurrence] + 1\n \n else:\n for i in range(len(cal)):\n if cal[i][day_of_week_index] >= 10:\n week_index = i\n break\n\n date = cal[week_index][day_of_week_index]\n return datetime.date(year, month, date)", "def day_of_week(day, month, year):\n bias = (14 - month) // 12\n m_year = year - bias\n mth = month + 12 * bias - 2\n return (day + m_year + m_year // 4 - m_year // 100 + m_year // 400 + (31 * mth) // 12) % 7", "def next_day_of_week(current, day_of_week):\n\n while current.weekday() != day_of_week:\n current += timedelta(1)\n return current", "def getDay(self):\n return _libsbml.Date_getDay(self)", "def _get_earliest_monday(self):\n try:\n earliest_request_date = self.org_admin.get_hours_requested().\\\n order_by('usertimelog__datetime_start').first().\\\n usertimelog.datetime_start\n except:\n earliest_request_date = self.org_admin.get_hours_approved().\\\n order_by('usertimelog__datetime_start').first().\\\n usertimelog.datetime_start\n\n earliest_monday = earliest_request_date - timedelta(\n days=(earliest_request_date.weekday()))\n earliest_monday = earliest_monday.replace(hour=00, minute=00, second=00)\n\n return earliest_monday", "def weekday_name(day_of_week):\n\n weekday_names = [\n 'Sunday', \n 'Monday', \n 'Tuesday', \n 'Wednesday', \n 'Thursday', \n 'Friday', \n 'Saturday']\n \n if day_of_week < 1 or day_of_week > 7:\n return 'None! Sowwy.'\n\n if day_of_week == 1:\n print(weekday_names[0])\n if day_of_week == 2:\n print(weekday_names[1])\n if day_of_week == 3:\n print(weekday_names[2])\n if day_of_week == 4:\n print(weekday_names[3])\n if day_of_week == 5:\n print(weekday_names[4])\n if day_of_week == 6:\n print(weekday_names[5]) \n if day_of_week == 7:\n print(weekday_names[6])", "def get_next_weekday(date, weekday):\n return date + dt.timedelta(days=(weekday - date.weekday() + 7) % 7)", "def get_trip_day_weekday(self):\n\n days = {\"Lundi\":'1', \"Mardi\":'2', \"Mercredi\":'3', \"Jeudi\":'4', \"Vendredi\":'5', \"Samedi\":'6', \"Dimanche\":'7'}\n\n return int(days[dict(self.TRIP_DAY_SELECTIONS)[self.trip_day]])", "def __str__(self):\n return self.day_of_week", "def format_weekday(time):\n return time.strftime(\"%A\").lower()", "def week(start_day=\"monday\"):\r\n today = datetime.datetime.combine(datetime.date.today(), datetime.datetime.min.time())\r\n weekday = today.weekday()\r\n week = list(calendar.Calendar(getattr(calendar, start_day.upper())).iterweekdays())\r\n pos = week.index(weekday)\r\n week_stamp = (to_stamp(today - datetime.timedelta(days=pos - i)) for i, d in enumerate(week))\r\n return collections.OrderedDict(((calendar.day_name[a], (b, b + DAY)) for a, b in zip(week, week_stamp)))", "def WEEKDAY(\n serial_number: func_xltypes.XlNumber,\n return_type: func_xltypes.XlNumber = None\n) -> func_xltypes.XlNumber:\n\n date = utils.number_to_datetime(int(serial_number))\n\n if return_type is None:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 1:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n # weekday() is 0 based, starting on a Monday\n elif int(return_type) == 2:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 3:\n # Numbers 0 (Monday) through 6 (Sunday)\n weekDays = (0, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 11:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 12:\n # Numbers 1 (Tuesday) through 7 (Monday)\n weekDays = (7, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 13:\n # Numbers 1 (Wednesday) through 7 (Tuesday)\n weekDays = (6, 7, 1, 2, 3, 4, 5)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 14:\n # Numbers 1 (Thursday) through 7 (Wednesday)\n weekDays = (5, 6, 7, 1, 2, 3, 4)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 15:\n # Numbers 1 (Friday) through 7 (Thursday)\n weekDays = (4, 5, 6, 7, 1, 2, 3)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 16:\n # Numbers 1 (Saturday) through 7 (Friday)\n weekDays = (3, 4, 5, 6, 7, 1, 2)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 17:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n else:\n raise xlerrors.NumExcelError(\n f\"return_type needs to be omitted or one of 1, 2, 3, 11, 12, 13,\\\n 14, 15, 16 or 17. You supplied {return_type}\")", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def dayname(self):\n return self.strftime(\"%A\")", "def get_this_week_label(self):\n return gettext_lazy('This week')", "def get_today_week_number(self):\n\n today = date.today()\n iso_result = today.isocalendar()\n return iso_result[1]", "def WEEKDAY(date, return_type=1):\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n return (_make_datetime(date).weekday() - first) % 7 + index", "def find_date(startdate, weekday, weeknumber):\n import datetime\n # The +1 makes this match up with linux times (day 1 = Monday)\n daysahead = weekday - (startdate.weekday() + 1)\n if daysahead < 0:\n # Target day already happened this week\n daysahead += 7\n # Add 7 days for each Week Of Month we want - but 'This' week is week 1\n daysahead += 7 * (weeknumber - 1)\n return startdate + datetime.timedelta(daysahead)", "def week(self):\n if self._week.lower() == 'wild card':\n return WILD_CARD\n if self._week.lower() == 'division':\n return DIVISION\n if self._week.lower() == 'conf. champ.':\n return CONF_CHAMPIONSHIP\n if self._week.lower() == 'superbowl':\n return SUPER_BOWL\n return self._week", "def weekly():", "def next_weekday(date, weekday):\n delta = weekday - date.weekday()\n if delta < 0:\n delta += 7\n return date + timedelta(days=int(delta))", "def formatWeekDay(self, day):\n return '<th class=\"day\">%s</th>' % day_abbr[day]", "def format_dow(value):\n if value:\n return [\n 'Sunday',\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n ][value]\n else:\n return 'N/A'", "def day_num(x):\r\n if x==\"Sunday\":\r\n return 0\r\n elif x==\"Monday\":\r\n return 1\r\n elif x==\"Tuesday\":\r\n return 2\r\n elif x==\"Wednesday\":\r\n return 3\r\n elif x==\"Thursday\":\r\n return 4\r\n elif x==\"Friday\":\r\n return 5\r\n elif x==\"Saturday\":\r\n return 6", "def dayofweek(self) -> Index:\n warnings.warn(\n \"`dayofweek` will return int32 index instead of int 64 index in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.dayofweek)", "def dayofweek(day, month, year, formatresult=True):\n if formatresult is False:\n return calendar.weekday(year, month, day) + 1\n days = {\n 0: 'Monday',\n 1: \"Tuesday\",\n 2: \"Wednesday\",\n 3: \"Thursday\",\n 4: \"Friday\",\n 5: \"Saturday\",\n 6: \"Sunday\"\n }\n return days[calendar.weekday(year, month, day)]", "def weekday(self, *args, **kwargs): # real signature unknown\r\n pass", "def first_day_of_month(date):\n return date.replace(day=1)", "def first_day_of_month(date):\n return date.replace(day=1)", "def date_form(day):\r\n new_day = \"\"\r\n if day == \"Monday\":\r\n new_day = \"Poniedziałek\"\r\n elif day == \"Tuesday\":\r\n new_day = \"Wtorek\"\r\n elif day == \"Wednesday\":\r\n new_day = \"Środa\"\r\n elif day == \"Thursday\":\r\n new_day = \"Czwartek\"\r\n elif day == \"Friday\":\r\n new_day = \"Piątek\"\r\n elif day == \"Saturday\":\r\n new_day = \"Sobota\"\r\n elif day == \"Sunday\":\r\n new_day = \"Niedziela\"\r\n return new_day", "def week_init():\n week = input('Week to check: MM/DD/YYYY\\n')\n week = dtt.datetime.strptime(week,'%m/%d/%Y') #turns input to a datetime\n beforeday = input('Check days before date (Press enter to use today): MM/DD/YYYY\\n') or dtt.date.today()\n if (beforeday != dtt.date.today()):\n beforeday = dtt.datetime.strptime(beforeday,'%m/%d/%Y')\n return week, beforeday", "def day_07_b() -> int:\n return 0", "def getOneDay(self,day_number=0):\n return self.event_time_sequence[day_number]", "def DayOfWeek(year, month, day):\n num = year * 365\n num = num + year // 4 + 1\n num = num - (year // 100 + 1)\n num = num + year // 400 + 1\n if month < 3 and LeapYear(year):\n num = num - 1\n return (num + MONTH_OFFSETS[month - 1] + day + 4) % 7 + 1", "def get_current_day_week_number(week_delta=0):\n return (datetime.today() + timedelta(weeks=week_delta)).isocalendar()[1]", "def day_07_a() -> int:\n return 0", "def days_of_the_week(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'DayOfWeek']]]]]:\n return pulumi.get(self, \"days_of_the_week\")" ]
[ "0.7771828", "0.7742936", "0.7577031", "0.75041586", "0.74610543", "0.74022716", "0.74017537", "0.7226098", "0.715162", "0.71447414", "0.7032424", "0.69580626", "0.68952614", "0.6892024", "0.6892024", "0.6819581", "0.6817187", "0.6786361", "0.67820686", "0.67820686", "0.67739815", "0.6772486", "0.6748926", "0.6748926", "0.6729732", "0.67196107", "0.6719052", "0.6707428", "0.669348", "0.6691228", "0.6655274", "0.66373277", "0.6631656", "0.6577313", "0.65757084", "0.6570707", "0.65643585", "0.6551987", "0.65515757", "0.65436834", "0.6510603", "0.6483161", "0.6477809", "0.64646626", "0.6453292", "0.6426419", "0.63563794", "0.63525856", "0.63353413", "0.63317156", "0.63024795", "0.63016605", "0.62984246", "0.6288423", "0.6285493", "0.6273741", "0.62495583", "0.6221345", "0.62168336", "0.6209232", "0.6201185", "0.61978763", "0.6197852", "0.6195678", "0.61954254", "0.61921555", "0.61831224", "0.6169908", "0.6139091", "0.6132008", "0.61015975", "0.6074397", "0.60736555", "0.60684794", "0.60671055", "0.604498", "0.6007471", "0.5978471", "0.595789", "0.5954382", "0.5939457", "0.5935921", "0.5928186", "0.5927785", "0.59254557", "0.59235096", "0.5922619", "0.59103566", "0.58998567", "0.58700246", "0.5854158", "0.5854158", "0.58516204", "0.5850265", "0.58484024", "0.58455324", "0.5842351", "0.58400804", "0.5831603", "0.581228" ]
0.6687542
30
Returns the q'th percentile of the distribution given in the argument 'data'. Uses the 'precision' parameter to control the noise level.
def Quantile(data, q, precision=1.0): N, bins = np.histogram(data, bins=precision*np.sqrt(len(data))) norm_cumul = 1.0*N.cumsum() / len(data) for i in range(0, len(norm_cumul)): if norm_cumul[i] > q: return bins[i]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Quartiles(data):\n q = np.percentile(data, [25, 50, 75])\n\n return q[0], q[1], q[2]", "def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)", "def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):\r\n ps = [p_vals] if np.isscalar(p_vals) else p_vals\r\n\r\n if not sorted_:\r\n data = sorted(data)\r\n n = len(data)\r\n d = []\r\n for p in ps:\r\n fi = p * n / 100 - 0.5\r\n if fi <= 0: # maybe extrapolate?\r\n d.append(data[0])\r\n elif fi >= n - 1:\r\n d.append(data[-1])\r\n else:\r\n i = int(fi)\r\n d.append((i+1 - fi) * data[i] + (fi - i) * data[i+1])\r\n return d[0] if np.isscalar(p_vals) else d", "def get_percentile(self, q):\n return None", "def get_percentile(self, q):\n return None", "def quantile(x, p):\n sorted_x = sorted(x)\n # round p_index to base int\n p_index = int(p * len(x))\n return sorted_x[p_index]", "def calcrange_quartile(data, log=False):\n if not isinstance(data, numpy.ndarray):\n data = numpy.array(data)\n if log:\n data = data[data > 0.]\n\n if len(data) == 0:\n if log: return 0.1, 1.\n else: return 0., 1.\n\n data = numpy.sort(data)\n q1 = data[int(math.floor(0.25*len(data)))]\n q3 = data[int(math.floor(0.75*len(data)))]\n if log:\n return q1 / (q3 - q1), q3 * (q3 - q1)\n else:\n return q1 - (q3 - q1), q3 + (q3 - q1)", "def quantile(x,p):\n p_index = int(p*len(x))\n return sorted(x)[p_index]", "def quantile(x, p):\n p_index = int(p * len(x))\n return sorted(x)[p_index]", "def quantile(x, p):\n p_index = int(p * len(x))\n return sorted(x)[p_index]", "def quantile(x, p):\n p_index = int(p * len(x))\n return sorted(x)[p_index]", "def IQR(data):\n return percentile(data, 75) - percentile(data, 25)", "def quantile(xs: List[float], p: float) -> float:\n p_index = int(p * len(xs))\n return sorted(xs)[p_index]", "def default_quantile():\n return np.logspace(-5, 0, 100)", "def get_percentile(self, q):\n return self.sum_stat_sample_ratio.get_percentile(q)", "def percentile(t: torch.tensor, q: float):\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (t.numel() - 1))\n result = t.view(-1).kthvalue(k).values.item()\n return result", "def get_percentile(self, q):\n return self.sum_stat_sample_delta.get_percentile(q)", "def percentile(field, q):\n # https://gist.github.com/spezold/42a451682422beb42bc43ad0c0967a30\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (field.shape[1] - 1))\n result = field.kthvalue(k, dim=1).values\n return result", "def _quantile(data, quantile):\r\n index = quantile * (len(data) - 1)\r\n bottom_index = int(floor(index))\r\n top_index = int(ceil(index))\r\n\r\n difference = index - bottom_index\r\n output = (1 - difference) * \\\r\n data[bottom_index] + difference * data[top_index]\r\n\r\n return output", "def get_percentile(self, q):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)", "def get_quartile_data(cls, data: tuple or list) -> tuple:\n cls._data_validation(data)\n # Sort the data\n sorted_data = sorted(list(data))\n # Get q2, which is the median\n q2 = cls.get_median(data)\n first_half_data = list()\n second_half_data = list()\n # add to first half until median, then add to second half\n for i in range(len(sorted_data)):\n # if less than q2, first half\n if sorted_data[i] < q2:\n first_half_data.append(sorted_data[i])\n # if greather than q2, second half, skips q2\n elif sorted_data[i] > q2:\n second_half_data.append(sorted_data[i])\n # use median method on halves to get quartiles\n q1 = cls.get_median(first_half_data)\n q3 = cls.get_median(second_half_data)\n iqr = q3-q1\n return q1, q2, q3, iqr", "def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)", "def test_quantile(self):\r\n\r\n # suffle the data to be sure, it is getting sorted\r\n sample_data = array(range(1, 11))\r\n shuffle(sample_data)\r\n\r\n # regular cases\r\n expected_output = [1.9, 2.8, 3.25, 5.5, 7.75, 7.93]\r\n list_of_quantiles = [0.1, 0.2, 0.25, 0.5, 0.75, 0.77]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(expected_output, output)\r\n\r\n sample_data = array([42, 32, 24, 57, 15, 34, 83, 24, 60, 67, 55, 17,\r\n 83, 17, 80, 65, 14, 34, 39, 53])\r\n list_of_quantiles = [0.5]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(output, median(sample_data))\r\n\r\n # quantiles must be between [0, 1]\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, [0.1, 0.2, -0.1, 2, 0.3, 0.5])\r\n\r\n # quantiles must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, 1)\r\n\r\n # the data must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(1, [0])", "def _percentile(self, data, percent):\n if not data:\n return None\n k = (len(data) - 1) * percent\n f = math.floor(k)\n c = math.ceil(k)\n if f == c:\n return data[int(k)]\n d0 = data[int(f)] * (c - k)\n d1 = data[int(c)] * (k - f)\n return d0 + d1", "def quantile(a, prob):\n a=numpy.asanyarray(a)\n a=a[numpy.logical_not(numpy.isnan(a))].ravel()\n n=a.size\n\n if prob>=1-.5/n:\n return a.max()\n elif prob<=.5/n:\n return a.min()\n\n # find the two bounds we're interpreting between:\n # that is, find i such that (i+.5) / n <= prob <= (i+1.5)/n\n t=n*prob-.5\n i=int(t)\n\n # partial sort so that the ith element is at position i, with bigger ones\n # to the right and smaller to the left\n a.sort()\n\n if i==t: # did we luck out and get an integer index?\n return a[i]\n else:\n # we'll linearly interpolate between this and the next index\n smaller=a[i]\n larger=a[i+1:].min()\n if numpy.isinf(smaller):\n return smaller # avoid inf - inf\n return smaller+(larger-smaller)*(t-i)", "def get_quartile_data(cls, data: tuple or list) -> tuple:\n cls._data_validation(data)\n from math import floor\n # Sort the data\n n = cls.get_n(data)\n if n == 0:\n # Empty dataset, returns zeroes\n return 0, 0, 0, 0\n sorted_data = sorted(list(data))\n n_is_odd = True if n % 2 == 1 else False\n\n # Get middle index\n odd_middle_index = floor(n / 2)\n even_upper_index = floor(n / 2)\n even_lower_index = floor(n / 2) - 1\n\n # Get q2, which is the median\n q2 = cls.get_median(data)\n first_half_data = list()\n second_half_data = list()\n\n # add to first half until median, then add to second half\n if n_is_odd:\n for i in range(n):\n if i < odd_middle_index:\n first_half_data.append(sorted_data[i])\n # note how if index = middle_index, skips\n elif i > odd_middle_index:\n second_half_data.append(sorted_data[i])\n else:\n for i in range(n):\n if i <= even_lower_index:\n first_half_data.append(sorted_data[i])\n # note how if index = middle_index, skips\n else:\n second_half_data.append(sorted_data[i])\n # use median method on halves to get quartiles\n q1 = cls.get_median(first_half_data)\n q3 = cls.get_median(second_half_data)\n iqr = q3 - q1\n return q1, q2, q3, iqr", "def quartiles(x, percentile):\n length = len(x)\n\n if percentile == 25:\n center = length // 4\n elif percentile == 75:\n center = length // 2 + length // 4\n\n x.sort()\n\n if length % 2 == 0:\n return (x[center - 1] + x[center]) / 2\n else:\n return x[center]", "def get_percentile(before, level):\n snr = np.array(before.getColumnByName('snr')[:])\n return np.percentile(snr, level)", "def quantile_func(q):\n def f(x):\n return np.quantile(x, q)\n\n return f", "def percentile_filter(data, feats):\n\n # Determines the fraction of nonzero values in the data.\n sparsity_frac = feats['nonzeros'] / (feats['nrows'] * feats['ncols'])\n\n # NOTE: Added convenience term derived from experience.\n thresh = np.percentile(data.ravel(), q=100 * (1 - (sparsity_frac + 0.1)))\n\n # Replace p-values below threshold with zero.\n data[(data > 0) & (data < thresh)] = 0\n\n return data", "def torch_percentile(t, q):\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (t.numel() - 1))\n result = t.view(-1).kthvalue(k).values.item()\n return result", "def percentile(p):\n assert 1.0 > p >= 0.0\n\n def __p1(ls):\n k = math.floor(len(ls) * p)\n assert k >= 0\n assert k < len(ls)\n\n start = 0\n end = len(ls) - 1\n\n while start <= end:\n pivot_index = start\n pivot_val = ls[pivot_index]\n\n for i in range(pivot_index + 1, end + 1):\n if ls[i] >= pivot_val:\n continue\n else:\n ls[pivot_index] = ls[i]\n pivot_index += 1\n ls[i] = ls[pivot_index]\n\n ls[pivot_index] = pivot_val\n if pivot_index == k:\n return pivot_val\n elif pivot_index > k:\n end = pivot_index - 1\n else:\n start = pivot_index + 1\n\n return ls[start]\n\n return __p1", "def median(data, weights):\n return quantile(data, weights, 0.5)", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def calc_stats(data):\n x = np.asarray(data, np.float)\n vals_min = np.min(x)\n vals_max = np.max(x)\n q2 = np.percentile(x, 50, interpolation='linear')\n q1 = np.percentile(x, 25, interpolation='lower')\n q3 = np.percentile(x, 75, interpolation='higher')\n iqr = q3 - q1\n whisker_dist = 1.5 * iqr\n\n # in order to prevent drawing whiskers outside the interval\n # of data one defines the whisker positions as:\n d1 = np.min(x[x >= (q1 - whisker_dist)])\n d2 = np.max(x[x <= (q3 + whisker_dist)])\n return {\n 'min': vals_min,\n 'max': vals_max,\n 'q1': q1,\n 'q2': q2,\n 'q3': q3,\n 'd1': d1,\n 'd2': d2\n }", "def p(n):\n def p_(x):\n return np.percentile(x, n)\n\n p_.__name__ = \"p_%s\" % n\n return p_", "def quantile(agg, k=4, name='quantile'):\n\n w = 100.0 / k\n p = np.arange(w, 100 + w, w)\n\n if p[-1] > 100.0:\n p[-1] = 100.0\n\n q = np.array([stats.scoreatpercentile(agg.data, pct) for pct in p])\n q = np.unique(q)\n k_q = len(q)\n\n if k_q < k:\n print(\"Quantile Warning: Not enough unique values for k classes (using {} bins)\".format(k_q))\n\n return DataArray(_bin(agg.data, q, np.arange(k_q)),\n name=name,\n dims=agg.dims,\n coords=agg.coords,\n attrs=agg.attrs)", "def dp_quantile(data, alpha, candidates=None, lower=None, upper=None, mechanism=\"Automatic\", privacy_usage=None, interpolation=\"midpoint\", **kwargs):\n return Component(\n \"DPQuantile\",\n arguments={\n 'data': Component.of(data),\n 'candidates': Component.of(candidates),\n 'lower': Component.of(lower),\n 'upper': Component.of(upper)\n },\n options={\n 'alpha': alpha,\n 'mechanism': mechanism,\n 'privacy_usage': serialize_privacy_usage(privacy_usage),\n 'interpolation': interpolation\n },\n constraints=kwargs)", "def _get_percentile_cutoff(data, cutoff=None, percentile=None, required=False):\n if percentile is not None:\n if cutoff is not None:\n raise ValueError(\n \"Only one of `cutoff` and `percentile` should be given.\"\n \"Got cutoff={}, percentile={}\".format(cutoff, percentile)\n )\n if not isinstance(percentile, numbers.Number):\n return [_get_percentile_cutoff(data, percentile=p) for p in percentile]\n if percentile < 1:\n warnings.warn(\n \"`percentile` expects values between 0 and 100.\"\n \"Got {}. Did you mean {}?\".format(percentile, percentile * 100),\n UserWarning,\n )\n cutoff = np.percentile(np.array(data).reshape(-1), percentile)\n elif cutoff is None and required:\n raise ValueError(\"One of either `cutoff` or `percentile` must be given.\")\n return cutoff", "def kernel_quantile_heuristic(X, q=0.5):\n pairwise_sq_dists = pdist(X, 'sqeuclidean')\n quantile_heuristic_s2 = np.quantile(pairwise_sq_dists, q=q)\n return quantile_heuristic_s2", "def test_check_data_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, percentiles=[25, 50, 75])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def transfo_quantile(xx):\n\n # nn = np.zeros(len(xx))\n # oo = np.argsort(xx)\n # nn[oo] = np.arange(len(xx)) / len(xx) + 1 / (2 * len(xx))\n # return nn\n\n return rankdata(xx) / len(xx) - 1 / (2 * len(xx))", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def matlab_percentile(in_data, percentiles):\n data = np.sort(in_data)\n p_rank = 100.0 * (np.arange(data.size) + 0.5) / data.size\n perc = np.interp(percentiles, p_rank, data, left=data[0], right=data[-1])\n return perc", "def kempton_taylor_q(counts, lower_quantile=.25, upper_quantile=.75):\n n = len(counts)\n lower = int(ceil(n*lower_quantile))\n upper = int(n*upper_quantile)\n sorted = counts.copy()\n sorted.sort()\n return (upper-lower)/log(sorted[upper]/sorted[lower])", "def normalizeprctile(expdat,percent=80):\n\tparams=locals()\n\n\t# select the bacteria to use - don't want to include very low freq. bacteria\n\tnewexp=hs.filterminreads(expdat,1*len(expdat.samples))\n\n\tpercvals=np.percentile(newexp.data,percent,axis=0)\n#\tplt.figure()\n#\tplt.plot(percvals)\n\tpercvals=percvals/np.mean(percvals)\n\tnewexp=hs.copyexp(expdat)\n\tfor idx,samp in enumerate(expdat.samples):\n\t\tnewexp.data[:,idx]=newexp.data[:,idx]*percvals[idx]\n\tnewexp.filters.append(\"normalize percentile %f\" % percent)\n\ths.addcommand(newexp,\"normalizeprctile\",params=params,replaceparams={'expdat':expdat})\n\n\treturn newexp", "def trimean(data):\n p_25, p_50, p_75 = percentile(data, [25, 50, 75], axis=0)\n\n return (p_25 + 2 * p_50 + p_75) / 4", "def get_percentile(data_list, score, kind='weak'):\n n = len(data_list)\n\n if kind == 'strict':\n return len([i for i in data_list if i < score]) / float(n) * 100\n elif kind == 'weak':\n return len([i for i in data_list if i <= score]) / float(n) * 100\n elif kind == 'mean':\n return (len([i for i in data_list if i < score]) + len([i for i in data_list if i <= score])) * 50 / float(n)\n else:\n raise ValueError(\"The kind kwarg must be 'strict', 'weak' or 'mean'. You can also opt to leave it out and rely on the default method.\")", "def quantileValues(data, device):\n r = pd.DataFrame([])\n if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):\n minValue = data.min()\n maxValue = data.max()\n q1 = data.quantile(0.25)\n # q2 = tmp['deltaSeconds'].quantile(0.5)\n q3 = data.quantile(0.75)\n QR = q3 - q1\n upper = 1.5 * QR + q3\n lower = q1 - 1.5 * QR\n\n elif isinstance(data, np.ndarray):\n minValue = data.min()\n maxValue = data.max()\n q1 = np.percentile(data, 25)\n # q2 = np.percentile(data, 50)\n q3 = np.percentile(data, 75)\n QR = q3 - q1\n upper = 1.5 * QR + q3\n lower = q1 - 1.5 * QR\n r = (r.append(pd.DataFrame({'mac': device.mac, 'minValue': minValue,\n 'maxValue': maxValue, 'lower': lower,\n 'upper': upper, 'q1': q1, 'q3': q3},\n index=[0]), ignore_index=True))\n return r", "def percentile(scores, student_score):\n scores = np.array(sorted(scores))\n num_scores = len(scores)\n return round(sum(scores <= student_score) / float(num_scores) * 100, 2)", "def percentile(data, statfxn, niter=10000, alpha=0.05):\n\n index = _make_boot_index(data.shape[0], niter)\n boot_stats = statfxn(data[index], axis=-1)\n\n # compute the `alpha/2` and `1-alpha/2` percentiles of `boot_stats`\n CI = numpy.percentile(boot_stats, [alpha * 50, 100 - (alpha * 50)], axis=0)\n\n return CI", "def PrivateQuantile(Z, q, eps, xmin, xmax):\n\n N = len(Z)\n m = np.floor((N - 1) * q + 1.5)\n Z[Z < xmin] = xmin\n Z[Z > xmax] = xmax\n Z_dict = {i: Z[i - 1] for i in range(1, len(Z) + 1)}\n Z_dict[0] = np.min(Z) # see Du et al, I found Smith confusing\n Z_dict[len(Z) + 1] = np.max(Z) # see Du et al, I found Smith confusing\n\n ps = []\n prob_sum = 0\n\n for i in range(len(Z) + 1):\n # p_i = (Z_dict[i+1] - Z_dict[i]) * np.exp(eps * Utility(m, i)) #du\n p_i = (Z_dict[i + 1] - Z_dict[i]) * np.exp(-eps * np.abs(i - q * k)) # smith\n prob_sum += p_i\n ps.append(p_i)\n\n prob_vec = [p_i / prob_sum for p_i in ps]\n j = np.random.choice(a=list(range(len(Z) + 1)), size=1, p=prob_vec)[0]\n out = np.random.uniform(low=Z_dict[j], high=Z_dict[j + 1], size=1)\n\n return (out)", "def quantile(image, q, nonzero=True):\n img_arr = image.numpy()\n if isinstance(q, (list,tuple)):\n q = [qq*100. if qq <= 1. else qq for qq in q]\n if nonzero:\n img_arr = img_arr[img_arr>0]\n vals = [np.percentile(img_arr, qq) for qq in q]\n return tuple(vals)\n elif isinstance(q, (float,int)):\n if q <= 1.:\n q = q*100.\n if nonzero:\n img_arr = img_arr[img_arr>0]\n return np.percentile(img_arr[img_arr>0], q)\n else:\n raise ValueError('q argument must be list/tuple or float/int')", "def test_percentile_kurtosis():\n f = np.asarray([\n [0.99, 1.0, 0.5, 0.52],\n [0.69, 0.6, 0.61, 1.0]])\n R = common_metrics.percentile_kurtosis(f, maximise=True)\n expected = np.asarray(\n [1.06382979, 5.0])\n assert np.allclose(R, expected)", "def percentile(data, percentiles, weights=None):\n # check if actually weighted percentiles is needed\n if weights is None:\n return np.percentile(data, list(percentiles))\n if np.equal(weights, 1.).all():\n return np.percentile(data, list(percentiles))\n\n # make sure percentiles are fractions between 0 and 1\n if not np.greater_equal(percentiles, 0.0).all():\n raise ValueError(\"Percentiles less than 0\")\n if not np.less_equal(percentiles, 100.0).all():\n raise ValueError(\"Percentiles greater than 100\")\n\n # Make sure data is in correct shape\n shape = np.shape(data)\n n = len(data)\n if (len(shape) != 1):\n raise ValueError(\"wrong data shape, expecting 1d\")\n\n if len(weights) != n:\n print(n, len(weights))\n raise ValueError(\"weights must be the same shape as data\")\n if not np.greater_equal(weights, 0.0).all():\n raise ValueError(\"Not all weights are non-negative.\")\n\n _data = np.asarray(data, dtype=float)\n\n if hasattr(percentiles, '__iter__'):\n _p = np.asarray(percentiles, dtype=float) * 0.01\n else:\n _p = np.asarray([percentiles * 0.01], dtype=float)\n\n _wt = np.asarray(weights, dtype=float)\n\n len_p = len(_p)\n sd = np.empty(n, dtype=float)\n sw = np.empty(n, dtype=float)\n aw = np.empty(n, dtype=float)\n o = np.empty(len_p, dtype=float)\n\n i = np.argsort(_data)\n np.take(_data, i, axis=0, out=sd)\n np.take(_wt, i, axis=0, out=sw)\n np.add.accumulate(sw, out=aw)\n\n if not aw[-1] > 0:\n raise ValueError(\"Nonpositive weight sum\")\n\n w = (aw - 0.5 * sw) / aw[-1]\n\n spots = np.searchsorted(w, _p)\n for (pk, s, p) in zip(range(len_p), spots, _p):\n if s == 0:\n o[pk] = sd[0]\n elif s == n:\n o[pk] = sd[n - 1]\n else:\n f1 = (w[s] - p) / (w[s] - w[s - 1])\n f2 = (p - w[s - 1]) / (w[s] - w[s - 1])\n o[pk] = sd[s - 1] * f1 + sd[s] * f2\n return o", "def quartile(db: pd.DataFrame, col: str) -> pd.DataFrame:\n _, bins = pd.qcut(x=db[col],\n q=[0.25, 0.75], # [0.0, 0.25, 0.5, 0.75, 1.0],\n retbins=True, duplicates='drop')\n q1 = bins[0] # lower (first) quartile\n q3 = bins[1] # upper (third) quartile\n iqr = q3 - q1 # InterQuartile Range\n lower_fence = (q1 - 1.5 * iqr)\n upper_fence = (q3 + 1.5 * iqr)\n db.loc[db[col] < lower_fence, col] = q1\n db.loc[db[col] > upper_fence, col] = q3\n return db", "def quantiles(self, q: int):\n return self.dist.quantiles(q)", "def quantile(self, q: float) -> np.array:\n assert 0 <= q <= 1\n return np.array(\n list(\n chain.from_iterable(\n model.predict(self.featurized_data, q)\n for model in self.models\n )\n )\n )", "def quantile(df):\r\n\r\n\tdf_quantile_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_quantile_dict[col] = [df[col].quantile(0.25), df[col].quantile(0.5), df[col].quantile(0.75)]\r\n\r\n\tdf_quantile = pd.DataFrame(df_quantile_dict, index=['Quantile (25%)', 'Quantile (50%)', 'Quantile (75%)'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_quantile", "def percentile(N, percent):\n N.sort()\n if not N:\n return None\n k = (len(N) - 1) * percent\n f = math.floor(k)\n c = math.ceil(k)\n if f == c:\n return N[int(k)]\n d0 = N[int(f)] * (c - k)\n d1 = N[int(c)] * (k - f)\n return d0 + d1", "def compute_percentile(value, cutoffs):\n\tif value < cutoffs[0]:\n\t\treturn 0.0\n\n\tfor i, cutoff in enumerate(cutoffs):\n\t\tif value < cutoff:\n\t\t\treturn math.floor(100 * (float(i)/(len(cutoffs))))\n\t\t\tbreak\n\treturn 100.0", "def calc_q1(data: list) -> float:\n py = copy.copy(data)\n py.sort()\n return py[round(len(py) / 4)]", "def quantile(self, hypercube):\n raise NotImplementedError()", "def quantile_corner(x, q, weights=None):\n if weights is None:\n return np.percentile(x, [100. * qi for qi in q])\n else:\n idx = np.argsort(x)\n xsorted = x[idx]\n cdf = np.add.accumulate(weights[idx])\n cdf /= cdf[-1]\n return np.interp(q, cdf, xsorted).tolist()", "def quantile(self, q, *, axis=0, **kwargs) -> \"Dataset\":\n return self._quantile(q, axis=axis, func=np.quantile, **kwargs)", "def test_check_data_not_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def quantile(data, weights, quantile_limit):\n data = np.asarray(data)\n # TO DO: Allow to specify the axis\n if data.ndim == 0:\n raise TypeError(\"data must have at least one dimension\")\n\n elif data.ndim == 1:\n return quantile_1d(data, weights, quantile_limit)\n\n # elif data.ndim > 1:\n shape = data.shape\n imr = data.reshape((np.prod(shape[:-1]), shape[-1]))\n result = np.apply_along_axis(quantile_1d, -1, imr, weights, quantile_limit)\n return result.reshape(shape[:-1])", "def quantile_1d(data, weights, quantile_limit):\n data = np.asarray(data)\n weights = np.asarray(weights)\n if data.ndim != 1:\n raise TypeError(\"data must be a one dimensional array\")\n\n if data.shape != weights.shape:\n raise TypeError(\"the length of data and weights must be the same\")\n\n if not 0.0 <= quantile_limit <= 1.0:\n raise ValueError(\"quantile must have a value between 0.0 and 1.0\")\n\n # Sort the data\n ind_sorted = np.argsort(data)\n sorted_data = data[ind_sorted]\n notnan = ~np.isnan(sorted_data)\n if np.count_nonzero(notnan) == 0:\n return np.nan\n\n sorted_weights = np.nan_to_num(weights[ind_sorted][notnan])\n\n # Compute the auxiliary arrays\n cuml_weights = np.cumsum(sorted_weights)\n\n # TO DO: Check that the weights do not sum zero\n prob_normalized = (cuml_weights - 0.5 * sorted_weights) / np.sum(sorted_weights)\n\n # Get the value of the weighted median\n return np.interp(quantile_limit, prob_normalized, sorted_data[notnan])", "def test_lots_of_probability_thresholds(self):\n data = np.array(\n [\n [[2.9, 2.9, 2.9], [2.9, 2.9, 2.9], [2.9, 2.9, 2.9]],\n [[14.5, 14.5, 14.5], [14.5, 14.5, 14.5], [14.5, 14.5, 14.5]],\n [\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n ],\n ],\n dtype=np.float32,\n )\n\n input_probs = np.tile(np.linspace(1, 0, 30), (3, 3, 1)).T\n cube = set_up_probability_cube(\n input_probs.astype(np.float32),\n np.arange(30).astype(np.float32),\n threshold_units=\"degC\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n\n self.assertArrayAlmostEqual(result.data, data)", "def quintile(self, ratio):\n\t\tif ratio<=float(1)/float(5):\n\t\t\treturn 1\n\t\telif ratio <= float(2)/float(5):\n\t\t\treturn 2\n\t\telif ratio <= float(3)/float(5):\n\t\t\treturn 3\n\t\telif ratio <= float(4)/float(5):\n\t\t\treturn 4\n\t\telse:\n\t\t\treturn 5", "def percentile(N, percent):\n if not N:\n return None\n k = (len(N)-1) * percent\n f = math.floor(k)\n c = math.ceil(k)\n if f == c:\n return N[int(k)]\n d0 = N[int(f)] * (c-k)\n d1 = N[int(c)] * (k-f)\n return d0+d1", "def createQuantile(data, column_name, cut_of_point):\r\n data[data[column_name] > data[column_name].quantile(cut_of_point)] = 0\r\n return data", "def calc_percentile(self, p):\n bounds = self.range_bins\n r = []\n v = []\n for b in bounds:\n d = self._get_data_distance(0., b)\n if len(d) < 1:\n continue\n r.append(b)\n v.append(np.percentile(d, p * 100.)) # percentile value\n\n r = np.asarray(r)\n np.asarray(v)\n\n o = {'r': np.asarray(r), 'value': np.asarray(v)}\n if 'percentiles' not in self.statistic.keys():\n self.statistic.update({'percentiles': {}})\n\n self.statistic['percentiles'].update({p: o})", "def axis_distribution_ratio(data, cutoff, upper_or_lower=\"upper\"):\n\n count = 0\n if upper_or_lower == \"upper\":\n for a in data:\n if a > cutoff:\n count += 1\n\n elif upper_or_lower == \"lower\":\n for a in data:\n if a < cutoff:\n count += 1\n\n ratio = count/len(data)\n\n return ratio", "def quantile(data, quantiles):\r\n\r\n assert isinstance(data, list) or isinstance(data, ndarray), \"Data must be either\" +\\\r\n \" a Python list or a NumPy 1-D array\"\r\n assert isinstance(quantiles, list) or isinstance(quantiles, ndarray), \"Quantiles\" +\\\r\n \" must be either a Python list or a NumPy 1-D array\"\r\n assert all(map(lambda x: x >= 0 and x <= 1, quantiles)), \"All the elements \" +\\\r\n \"in the quantiles list must be greater than 0 and lower than one\"\r\n\r\n # unless the user wanted, do not modify the data\r\n data = deepcopy(data)\r\n\r\n if not isinstance(data, ndarray):\r\n data = array(data)\r\n data.sort()\r\n\r\n output = []\r\n # if needed different quantile methods could be used\r\n for one_quantile in quantiles:\r\n output.append(_quantile(data, one_quantile))\r\n\r\n return output", "def _getPercentile(points, n, interpolate=False):\n sortedPoints = sorted([ p for p in points if p is not None])\n if len(sortedPoints) == 0:\n return None\n fractionalRank = (n/100.0) * (len(sortedPoints) + 1)\n rank = int(fractionalRank)\n rankFraction = fractionalRank - rank\n\n if not interpolate:\n rank += int(math.ceil(rankFraction))\n\n if rank == 0:\n percentile = sortedPoints[0]\n elif rank - 1 == len(sortedPoints):\n percentile = sortedPoints[-1]\n else:\n percentile = sortedPoints[rank - 1] # Adjust for 0-index\n\n if interpolate:\n if rank != len(sortedPoints): # if a next value exists\n nextValue = sortedPoints[rank]\n percentile = percentile + rankFraction * (nextValue - percentile)\n\n return percentile", "def compute_quantile(risk, T_max: int, scenario_numbers, quantile):\r\n\r\n print(\"\\tComputing Quantile...\")\r\n # Init quantile\r\n q = np.zeros(T_max)\r\n for t in range(T_max):\r\n risk[t].sort()\r\n q[t] = risk[t][int(np.ceil(scenario_numbers[t] * quantile)) - 1]\r\n print(\"\\tDone\")\r\n\r\n return q", "def get_percentile(obs, bootstrap):\n if np.isnan(obs):\n return np.nan\n else:\n return np.searchsorted(np.sort(bootstrap), obs) / len(bootstrap)", "def test_simple_check_data_above(self):\n expected = np.array([8.15384615, 9.38461538, 11.6])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def get_IQR(lst):\n return (float(np.percentile(lst, 75)) - float(np.percentile(lst, 25)))", "def calc_iqr(sig):\n # iqr = subtract(*percentile(sig, [75, 25]))\n return np.percentile(sig, 75) - np.percentile(sig, 25)", "def ferdosi(data_points):\n (N, _) = data_points.shape\n twentieth = np.percentile(data_points, 20, axis=0)\n eightieth = np.percentile(data_points, 80, axis=0)\n sigmas = (eightieth - twentieth) / math.log(N)\n return np.min(sigmas)", "def threshold_func(distribution: List[float], alpha: List[float], **kwargs) -> float:\n threshold = np.quantile(distribution, q=alpha, interpolation=\"lower\", **kwargs)\n return threshold", "def perc(data,percentile):\n clip_value = np.percentile(data,percentile)\n data = clip(data,clip_value)\n return data", "def nrmse_ipercentile(self, q1=25, q2=75) -> float:\n\n q1 = np.percentile(self.true, q1)\n q3 = np.percentile(self.true, q2)\n iqr = q3 - q1\n\n return float(self.rmse() / iqr)", "def iqr(self, arr):\n a = np.asarray(arr)\n self.q1 = stats.scoreatpercentile(a, 25)\n self.q2 = stats.scoreatpercentile(a, 50)\n self.q3 = stats.scoreatpercentile(a, 75)", "def basic_stats(data):\n if isinstance(data, pd.DataFrame):\n return data.describe(percentiles=[0.5]).T.drop(['50%'], axis=1)\n else:\n return data.to_frame().describe(percentiles=[0.5]).T.drop(['50%'], axis=1)", "def get_percentile_from_freq(\n freq: pd.Series,\n p: float = 0.5,\n ascending: bool = False,\n include_boundary: Union[bool, None] = None,\n) -> pd.Series:\n freq = freq.sort_values(ascending=ascending)\n cutoff_idx = (freq.cumsum() >= p * freq.sum()).idxmax()\n\n if include_boundary is None:\n if ascending:\n # If your looking at the smallest, if there is a large\n # jump in count, you will get way too much of the data\n # so probably *dont*\n include_boundary = False\n else:\n # If you're looking at the topx largest, you probably want\n # to *include* the count just larger than the sum (because)\n # this is probably a large proportion of the total count\n include_boundary = True\n if include_boundary:\n return freq.loc[:cutoff_idx]\n return freq.loc[:cutoff_idx].iloc[:-1]", "def test_create_quantile(self):\n quantile = pmp.utils.create_quantile(0.5, 4.0)\n self.assertIsInstance(quantile, pmp.Quantile)", "def percentile(data, qval, labels=None, index=None):\n data = np.asanyarray(data)\n\n def single_group(vals):\n return np.percentile(vals, qval)\n\n if labels is None:\n return single_group(data)\n\n # ensure input and labels match sizes\n data, labels = np.broadcast_arrays(data, labels)\n\n if index is None:\n mask = labels > 0\n return single_group(data[mask])\n\n if np.isscalar(index):\n mask = labels == index\n return single_group(data[mask])\n\n # remap labels to unique integers if necessary, or if the largest\n # label is larger than the number of values.\n if (\n not _safely_castable_to_int(labels.dtype)\n or labels.min() < 0\n or labels.max() > labels.size\n ):\n # remap labels, and indexes\n unique_labels, labels = np.unique(labels, return_inverse=True)\n idxs = np.searchsorted(unique_labels, index)\n\n # make all of idxs valid\n idxs[idxs >= unique_labels.size] = 0\n found = unique_labels[idxs] == index\n else:\n # labels are an integer type, and there aren't too many.\n idxs = np.asanyarray(index, int).copy()\n found = (idxs >= 0) & (idxs <= labels.max())\n\n idxs[~found] = labels.max() + 1\n\n # reorder data and labels, first by labels, then by data\n order = np.lexsort((data.ravel(), labels.ravel()))\n data = data.ravel()[order]\n labels = labels.ravel()[order]\n\n locs = np.arange(len(labels))\n lo = np.zeros(labels.max() + 2, int)\n lo[labels[::-1]] = locs[::-1]\n hi = np.zeros(labels.max() + 2, int)\n hi[labels] = locs\n lo = lo[idxs]\n hi = hi[idxs]\n # lo is an index to the lowest value in input for each label,\n # hi is an index to the largest value.\n\n # here starts the part that really diverts from scipy's median finder; the\n # linear interpolation method used corresponds to the default behaviour of\n # np.percentile().\n size = hi - lo + 1 # size of the group\n frac = (size - 1) * (qval / 100) # fractional index relative to lo\n hi = lo - np.int64(-frac // 1) # ceiled absolute index to data\n lo = lo + np.int64(frac // 1) # floored absolute index to data\n part = frac % 1 # fractional part of index\n return (data[lo] + part * (data[hi] - data[lo])).tolist()", "def quantile(data, alpha, candidates=None, interpolation=\"midpoint\", **kwargs):\n return Component(\n \"Quantile\",\n arguments={\n 'data': Component.of(data),\n 'candidates': Component.of(candidates)\n },\n options={\n 'alpha': alpha,\n 'interpolation': interpolation\n },\n constraints=kwargs)", "def calculate_cornish_fisher_percentile(alpha, mu, sigma, skew, kurt):\n\n z = stats.norm.ppf(alpha)\n he2 = np.polynomial.hermite_e.hermeval(z, [0.0, 0.0, 1.0])\n he3 = np.polynomial.hermite_e.hermeval(z, [0.0, 0.0, 0.0, 1.0])\n he13 = np.polynomial.hermite_e.hermeval(z, [0.0, -1.0, 0.0, -2.0])\n\n w = (z +\n he2 * skew / 6 +\n he3 * kurt / 24 +\n he13 * (skew ** 2) / 36)\n\n return mu + sigma * w", "def quantile(self, axis: str = 'rows', q: float = 0.5) -> 'DataFrame':\n if not utils.is_number(q):\n raise TypeError('`q` must be a number between 0 and 1')\n if q < 0 or q > 1:\n raise ValueError('`q` must be between 0 and 1')\n return self._stat_funcs('quantile', axis, q=q)", "def percentile(self, p=None):\n if p is None:\n raise ValueError(\"Please supply p\")\n\n if type(p) not in [int, float]:\n raise TypeError(\"p is a \" + str(type(p)) + \", not int or float\")\n\n if (p < 0) or (p > 100):\n raise ValueError(\"p: \" + str(p) + \" is not between 0 and 100!\")\n\n self.run()\n\n new_files = []\n new_commands = []\n for ff in self:\n target = temp_file(\"nc\")\n\n cdo_command = (\n \"cdo -timpctl,\"\n + str(p)\n + \" \"\n + ff\n + \" -timmin \"\n + ff\n + \" -timmax \"\n + ff\n + \" \"\n + target\n )\n\n cdo_command = tidy_command(cdo_command)\n target = run_cdo(cdo_command, target)\n new_files.append(target)\n new_commands.append(cdo_command)\n\n self.history += new_commands\n self._hold_history = copy.deepcopy(self.history)\n\n self.current = new_files\n\n cleanup()", "def quantiles(x, qlist=[2.5, 25, 50, 75, 97.5]):\n # Make a copy of trace\n x = x.copy()\n\n # For multivariate node\n if x.ndim > 1:\n # Transpose first, then sort, then transpose back\n sx = np.transpose(np.sort(np.transpose(x)))\n else:\n # Sort univariate node\n sx = np.sort(x)\n\n try:\n # Generate specified quantiles\n quants = [sx[int(len(sx) * q / 100.0)] for q in qlist]\n\n return dict(zip(qlist, quants))\n\n except IndexError:\n print(\"Too few elements for quantile calculation\")", "def performance_quantiles(data, performance_measure):\n quantiles = pd.qcut(x=data[performance_measure], q=4, labels=['q1', 'q2', 'q3', 'q4'])\n bins = quantiles.to_frame(name=performance_measure + '_quantiles')\n data_quantiles = pd.merge(data, bins, right_index=True, left_index=True)\n data_quantiles.dropna(inplace=True)\n data_quantiles.sort_values(performance_measure + '_quantiles', inplace=True)\n return data_quantiles", "def printProbability(p):\n if p > .0000001:\n return \"%9.6f%%\" % (p * 100)\n else:\n return \"%9.3e\" % (p)", "def quantile(series: Sequence, quantile: Union[None, float] = None):\n if quantile is None:\n try:\n last, series = series[-1], series[:-1]\n return np.mean(series < last)\n except IndexError:\n return np.nan\n assert 0 <= quantile <= 1, \"quantile must be within [0, 1]\"\n return np.nanpercentile(series, quantile * 100)" ]
[ "0.72270185", "0.7012418", "0.6803648", "0.67029893", "0.67029893", "0.669614", "0.6685391", "0.65556973", "0.65500534", "0.65500534", "0.65500534", "0.64803755", "0.64564687", "0.64286137", "0.64046985", "0.6381233", "0.6363074", "0.636056", "0.6354891", "0.63055617", "0.6262271", "0.62104005", "0.61781657", "0.61349875", "0.61099523", "0.60976803", "0.6073696", "0.6039872", "0.5991325", "0.598613", "0.59652305", "0.59274745", "0.5917163", "0.5900064", "0.58929324", "0.5889406", "0.5865979", "0.5853466", "0.5847741", "0.5846151", "0.583102", "0.5823016", "0.5818445", "0.5810179", "0.5804896", "0.5794636", "0.57916933", "0.5786017", "0.5782249", "0.5769046", "0.5763376", "0.5760332", "0.57261485", "0.5719482", "0.57137287", "0.5702018", "0.5699559", "0.5690979", "0.5666378", "0.56658685", "0.56628805", "0.56587875", "0.5655953", "0.56516045", "0.56288034", "0.55750257", "0.55749226", "0.5543568", "0.55390424", "0.5536854", "0.5533037", "0.5526286", "0.55258083", "0.55214494", "0.55123323", "0.5504404", "0.55030763", "0.54941225", "0.54913145", "0.54886734", "0.54862607", "0.5483289", "0.5471594", "0.5470774", "0.546778", "0.54423887", "0.5440931", "0.5429077", "0.5422745", "0.54194427", "0.5417265", "0.5412508", "0.54115266", "0.54070294", "0.54031116", "0.53886133", "0.53788406", "0.53779006", "0.53677", "0.5367047" ]
0.7864236
0
Downloads a YouTube video by its unique id.
def youtube_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False): raw_video_info = get_content('http://www.youtube.com/get_video_info?video_id=%s' % id) video_info = parse.parse_qs(raw_video_info) if video_info['status'] == ['ok'] and ('use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']): title = parse.unquote_plus(video_info['title'][0]) stream_list = parse.parse_qs(raw_video_info)['url_encoded_fmt_stream_map'][0].split(',') else: # Parse video page when video_info is not usable. video_page = get_content('http://www.youtube.com/watch?v=%s' % id) ytplayer_config = json.loads(match1(video_page, r'ytplayer.config\s*=\s*([^\n]+);')) title = ytplayer_config['args']['title'] stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',') streams = { parse.parse_qs(stream)['itag'][0] : parse.parse_qs(stream) for stream in stream_list } for codec in yt_codecs: itag = str(codec['itag']) if itag in streams: download_stream = streams[itag] break url = download_stream['url'][0] if 'sig' in download_stream: sig = download_stream['sig'][0] else: sig = decrypt_signature(download_stream['s'][0]) url = '%s&signature=%s' % (url, sig) type, ext, size = url_info(url) print_info(site_info, title, type, size) if not info_only: download_urls([url], title, ext, size, output_dir, merge = merge)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download(idd, path):\n print(f'[{script}]: Downloading YT video \"{idd}\"...') if verbosity >= 1 else None\n\n try:\n yt = pytube.YouTube(\"https://www.youtube.com/watch?v=\" + idd)\n stream = yt.streams.filter(progressive=True).first()\n stream.download(path, filename=idd)\n except Exception:\n print(f'[{script}]: Failed download of YT video \"{idd}\".')\n return None\n\n data = {\n \"idd\": idd,\n \"abr\": stream.abr,\n \"acodec\": stream.audio_codec,\n \"bitrate\": stream.bitrate,\n \"codecs\": stream.codecs,\n \"fps\": stream.fps,\n \"mime\": stream.mime_type,\n \"res\": stream.resolution,\n \"vcodec\": stream.video_codec,\n \"size\": stream._filesize,\n \"frames\": stream.fps * yt.length,\n }\n\n file_path = path + \"/\" + data[\"idd\"] + \".mp4\"\n print(\n f'[{script}]: Download successful. Saved to \"{file_path}\".'\n ) if verbosity >= 2 else None\n return data", "def get_video(self, video_id):\n uri = 'videos/' + video_id\n return self.make_request(uri)", "def download_video(self, url):\n yt = YouTube(url)\n yt_filtered = yt.streams.filter(progressive=True, file_extension=\"mp4\")\n yt_resolutions = yt_filtered.order_by(\"resolution\")\n\n # Downloads the first video that fits the description\n video = yt_resolutions.desc().first()\n video.download()\n\n # Returns the filename\n return video.default_filename", "def get_youtube_video_url(video_id):\n url = \"https://www.youtube.com/watch?v=\" + video_id\n return url", "def download_from_youtube():\n linkinput = input(\"Enter the url you want to download: \")\n youtube_object = Youtube(linkinput)\n youtube_object.youtube()", "def get_video_by_id():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id + '/video.webm')\n ec = {'ThreatGrid.Sample.Id': sample_id}\n demisto.results([\n {\n 'Type': entryTypes['note'],\n 'EntryContext': ec,\n 'HumanReadable': '### ThreatGrid Sample Run Video File -\\n'\n + 'Your sample run video file download request has been completed successfully for '\n + sample_id,\n 'Contents': ec,\n 'ContentsFormat': formats['json']\n },\n fileResult(sample_id + '.webm', r.content)\n ])", "def get_yt_video(yt_url):\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'outtmpl': '%(id)s.%(ext)s'\n }\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result = ydl.extract_info(\n #'http://www.youtube.com/watch?v=BaW_jenozKc',\n yt_url,\n download=True # We just want to extract the info\n )\n\n if 'entries' in result:\n # Can be a playlist or a list of videos\n video = result['entries'][0]\n else:\n # Just a video\n video = result\n\n return video", "def play_youtube(self, media_id):\n pass", "def movieid_first_video_url(self, movie_id):\n YOUTUBE_URL = \"https://www.youtube.com/watch?v=\"\n VIDEOS_URL = \"https://api.themoviedb.org/3/movie/%s/videos\"\n url_with_movieid = VIDEOS_URL % (movie_id)\n parm_dict = {\"api_key\": self.api_key, \"language\": self.language}\n url = url_with_movieid + \"?\" + urlencode(parm_dict, doseq=True)\n # print url\n\n response = requests.get(url)\n json_dict = json.loads(response.text)\n response.close()\n\n youtube_video_key = json_dict['results'][0]['key']\n return YOUTUBE_URL + youtube_video_key", "async def get_video(videoid):\n\theaders = {\n\t\t'Client-ID': config['twitch_clientid'],\n\t\t'Authorization': f\"Bearer {get_token()}\",\n\t}\n\tdata = await common.http.request_coro(\"https://api.twitch.tv/helix/videos\", {\"id\": videoid.lstrip('v')}, headers=headers)\n\treturn json.loads(data)[\"data\"][0]", "def url(yt_id: str) -> str:\n return \"https://www.youtube.com/watch?v={}\".format(yt_id)", "def youtube_download(url, output_dir='.', merge=True, info_only=False):\n \n id = match1(url, r'youtu.be/([^/]+)') or parse_query_param(url, 'v')\n assert id\n \n youtube_download_by_id(id, title=None, output_dir=output_dir, merge=merge, info_only=info_only)", "def get_video(lesson_id, video_id):\n url = '{0}?cat={1}&video={2}'.format(BASE_URL, lesson_id, video_id)\n page = requests.get(url, verify=False)\n soup = BeautifulSoup(page.content)\n return soup.find('iframe')['src'].split('/')[-1]", "def play_youtube(self, media_id):\n raise NotImplementedError()", "def fetch_youtube_url(search_term, dev_key=None):\r\n in_cache, video_id = check_if_in_cache(search_term)\r\n if in_cache:\r\n return YOUTUBE_VIDEO_URL + video_id\r\n if not dev_key:\r\n YOUTUBE_SEARCH_BASE = \"https://www.youtube.com/results?search_query=\"\r\n try:\r\n response = requests.get(YOUTUBE_SEARCH_BASE + search_term).content\r\n html_response = html.fromstring(response)\r\n video = html_response.xpath(\"//a[contains(@class, 'yt-uix-tile-link')]/@href\")\r\n video_id = re.search(\"((\\?v=)[a-zA-Z0-9_-]{4,15})\", video[0]).group(0)[3:]\r\n log.debug(f\"Found video id {video_id} for search term {search_term}\")\r\n _ = save_to_cache(search_term=search_term, video_id=video_id)\r\n return YOUTUBE_VIDEO_URL + video_id\r\n except AttributeError as e:\r\n log.warning(f\"Could not find scrape details for {search_term}\")\r\n capture_exception(e)\r\n return None\r\n except IndexError as e:\r\n log.warning(f\"Could not perform scrape search for {search_term}, got a different HTML\")\r\n capture_exception(e)\r\n return None\r\n else:\r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n developerKey=dev_key,\r\n cache_discovery=False)\r\n try:\r\n in_cache, video_id = check_if_in_cache(search_term)\r\n\r\n if not in_cache:\r\n search_response = youtube.search().list(q=search_term,\r\n part='id, snippet').execute()\r\n for v in search_response['items']:\r\n if v['id']['kind'] == VIDEO:\r\n video_id = v['id']['videoId']\r\n log.debug(f\"Adding Video id {video_id}\")\r\n _ = save_to_cache(search_term=search_term, video_id=video_id) \r\n return YOUTUBE_VIDEO_URL + video_id\r\n except HttpError as err:\r\n err_details = loads(err.content.decode('utf-8')).get('error').get('errors')\r\n secho(\"Couldn't complete search due to following errors: \", fg='red')\r\n for e in err_details:\r\n error_reason = e.get('reason')\r\n error_domain = e.get('domain')\r\n error_message = e.get('message')\r\n\r\n if error_reason == 'quotaExceeded' or error_reason == 'dailyLimitExceeded':\r\n secho(f\"\\tYou're over daily allowed quota. Unfortunately, YouTube restricts API keys to a max of 10,000 requests per day which translates to a maximum of 100 searches.\", fg='red')\r\n secho(f\"\\tThe quota will be reset at midnight Pacific Time (PT).\" ,fg='red')\r\n secho(f\"\\tYou can request for Quota increase from https://console.developers.google.com/apis/api/youtube.googleapis.com/quotas.\", fg='red')\r\n else:\r\n secho(f\"\\t Search failed due to {error_domain}:{error_reason}, message: {error_message}\")\r\n return None", "def test_task_video_download(url_to_video: str, empty_video_resource: VideoResource):\n download_video(url_to_video, empty_video_resource.id)\n empty_video_resource.refresh_from_db()\n video_instance = empty_video_resource.videos.filter(primary=True).first()\n\n assert empty_video_resource.videos.all()\n assert video_instance.extension == 'mp4'\n assert video_instance.primary\n for item in video_instance.video.open():\n assert item", "async def youtube(self, ctx, *args):\n if not args:\n await ctx.send(\"usage: `>youtube [search string]`\")\n return\n search_string = \" \".join(args)\n search_string = urllib.parse.urlencode({'search_query': search_string})\n response = requests.get('http://www.youtube.com/results?' + search_string + \"&hl=en_US&app=desktop\")\n if response.status_code == 200:\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})', response.content.decode())\n try:\n first_result_url = 'http://www.youtube.com/watch?v=' + search_results[0]\n except IndexError:\n with open('downloads/yt_dump.txt', 'w') as f:\n f.write(response.content.decode())\n #print(response.is_redirect)\n return await ctx.send(\"Found nothing!\")\n await ctx.send(first_result_url)\n self.logger.info(misolog.format_log(ctx, f\"{first_result_url}\"))\n else:\n await ctx.send(\"Error: status code \" + str(response.status_code))\n self.logger.info(misolog.format_log(ctx, f\"error{response.status_code}\"))", "async def get_youtube_video(self, ctx, *, query):\n\n if not query:\n return await ctx.send(\"Go on, search something.\")\n\n # Executor for sync function\n video_list = await self.bot.loop.run_in_executor(None, YouTube.sync_get_youtube_video, query)\n\n if not video_list:\n return await ctx.say(f\"Sorry, couldn't find anything for `{query}`\")\n\n # Return top hit\n await ctx.send(f'{video_list[0][\"video_url\"]}')", "async def download_video(v_url):\n reply = await v_url.get_reply_message()\n if v_url.pattern_match.group(2) != \"\":\n url = v_url.pattern_match.group(2)\n elif reply is not None:\n url = reply.message\n url = re.findall(r\"\\bhttps?://.*\\.\\S+\", reply.message)[0]\n else:\n return\n type = (\n v_url.pattern_match.group(1).lower()\n if v_url.pattern_match.group(1) is not None\n else \"a\"\n )\n await v_url.edit(\"`Preparing to download...`\")\n out_folder = Config.TMP_DOWNLOAD_DIRECTORY + \"youtubedl/\"\n Config.TMP_DOWNLOAD_DIRECTORY + \"/thumb_image.jpg\"\n if not os.path.isdir(out_folder):\n os.makedirs(out_folder)\n if type == \"a\":\n opts = {\n \"format\": \"bestaudio\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"key\": \"FFmpegMetadata\",\n \"writethumbnail\": True,\n \"embedthumbnail\": True,\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\n \"key\": \"FFmpegExtractAudio\",\n \"preferredcodec\": \"mp3\",\n \"preferredquality\": \"320\",\n }\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"quiet\": True,\n \"logtostderr\": False,\n }\n video = False\n song = True\n\n elif type == \"v\":\n opts = {\n \"format\": \"best\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"getthumbnail\": True,\n \"embedthumbnail\": True,\n \"xattrs\": True,\n \"writethumbnail\": True,\n \"key\": \"FFmpegMetadata\",\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\"key\": \"FFmpegVideoConvertor\", \"preferedformat\": \"mp4\"},\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"logtostderr\": False,\n \"quiet\": True,\n }\n song = False\n video = True\n\n try:\n await v_url.edit(\"`Fetching playlist data, please wait..`\")\n with YoutubeDL(opts) as ytdl:\n ytdl.extract_info(url)\n # print(ytdl_data['thumbnail'])\n filename = sorted(get_lst_of_files(out_folder, []))\n except DownloadError as DE:\n await v_url.edit(f\"`{str(DE)}`\")\n return\n except ContentTooShortError:\n await v_url.edit(\"`The download content was too short.`\")\n return\n except GeoRestrictedError:\n await v_url.edit(\n \"`Video is not available from your geographic location due to geographic restrictions imposed by a website.`\"\n )\n return\n except MaxDownloadsReached:\n await v_url.edit(\"`Max-downloads limit has been reached.`\")\n return\n except PostProcessingError:\n await v_url.edit(\"`There was an error during post processing.`\")\n return\n except UnavailableVideoError:\n await v_url.edit(\"`Media is not available in the requested format.`\")\n return\n except XAttrMetadataError as XAME:\n await v_url.edit(f\"`{XAME.code}: {XAME.msg}\\n{XAME.reason}`\")\n return\n except ExtractorError:\n await v_url.edit(\"`There was an error during info extraction.`\")\n return\n except Exception as e:\n await v_url.edit(f\"{str(type(e)): {str(e)}}\")\n return\n c_time = time.time()\n await v_url.edit(\"`YouTube Playlist Downloading Processing Now.\\nPlease Wait!`\")\n if song:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = True\n supports_streaming = False\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 180\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n try:\n ytdl_data_name_audio = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_audio[: (len(ytdl_data_name_audio) - 4)]\n + \".jpg\"\n )\n print(ytdl_data_name_audio)\n file_path = single_file\n song_size = file_size(file_path)\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_audio}`\"\n + \"\\n\"\n + f\"Size👉 {song_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n allow_cache=False,\n thumb=thumb,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_audio}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)\n if video:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = False\n supports_streaming = True\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 0\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n # print(ytdl_data)\n # for file in os.listdir(\"./DOWNLOADS/youtubedl/\"):\n # if file.endswith(\".jpg\"):\n # thumb = \"./DOWNLOADS/youtubedl/\" + file\n # print(os.path.join(\"./DOWNLOADS/youtubedl/\", file))\n # image_link = ytdl_data['thumbnail']\n # downloaded_image = wget.download(image_link,out_folder)\n # thumb = ytdl_data_name_video + \".jpg\"\n file_path = single_file\n video_size = file_size(file_path)\n try:\n ytdl_data_name_video = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_video[: (len(ytdl_data_name_video) - 4)]\n + \".jpg\"\n )\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_video}`\"\n + \"\\n\"\n + f\"Size👉 {video_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n thumb=thumb,\n allow_cache=False,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_video}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)", "def get_video(self, video_id):\n return self._videos.get(video_id, None)", "async def retrieveJS(self, videoId=None):\n session = await self.getSession\n resp = await session.get(\"https://www.youtube.com/iframe_api\")\n # if resp.status == 200 and", "def download_video(self, file_path, video_url, video_creation_time):\r\n logger.debug(\"Downloading video created at \" + _format_timestamp_iso(self.tz, video_creation_time) + \" from \"\r\n + video_url + \" to \" + file_path)\r\n failed = False\r\n try:\r\n self._download_with_api(file_path, video_url)\r\n except Exception as e:\r\n logger.debug(\"Video download failed using TikTokApi: \" + str(e))\r\n failed = True\r\n if not os.path.isfile(file_path):\r\n failed = True\r\n logger.debug(\"No file was created by TikTokApi at \" + file_path)\r\n elif os.stat(file_path).st_size < 1024:\r\n failed = True\r\n try:\r\n os.remove(file_path)\r\n logger.debug(\"Deleted malformed TikTokApi download at \" + file_path)\r\n except Exception as ee:\r\n logger.error(\"Unable to delete malformed TikTokApi download at \" + str(ee))\r\n if failed:\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n try:\r\n logger.debug(\"Falling back to YouTube-dl\")\r\n self.fallback_counter += 1\r\n self._download_with_ytdl(file_path, video_url)\r\n if not os.path.isfile(file_path):\r\n raise AssertionError(\"No file was created by YouTube-dl at \" + file_path)\r\n elif os.stat(file_path).st_size < 1024:\r\n try:\r\n os.remove(file_path)\r\n logger.debug(\"Deleted malformed YouTube-dl download at \" + file_path)\r\n except Exception as ee:\r\n raise AssertionError(\"Malformed file was created at \" + file_path +\r\n \" and could not be removed: \" + str(ee))\r\n raise AssertionError(\"Malformed file was created at \" + file_path + \" and was removed\")\r\n failed = False\r\n except youtube_dl.utils.DownloadError as ee:\r\n logger.error(\"YouTube-dl DownloadError: \" + str(ee))\r\n self.ytdl_downloaderror_counter += 1\r\n failed = True\r\n except Exception as ee:\r\n logger.error(\"Video download failed with YouTube-dl: \" + str(ee))\r\n self.other_error_counter += 1\r\n failed = True\r\n if not failed:\r\n try:\r\n os.utime(file_path, (video_creation_time, video_creation_time))\r\n except Exception as e:\r\n logger.debug(\"Unable to set utime of \" + str(video_creation_time) + \" on file \" + file_path +\r\n \", Error: \" + str(e))\r\n return True\r\n return False", "def do_downloads(filename1=\"og\", filename2=\"lyrical\", video_id=DEFALT_VIDEO_ID):\n original_video_url = youtube_id_to_url(video_id)\n download_from_url(original_video_url, filename1)\n lyrics_video_url = get_lyrics_url(original_video_url)\n download_from_url(lyrics_video_url, filename2)\n\n return filename1, filename2", "def download(video_identifier,\n output_filename,\n num_attempts=5,\n url_base='https://www.youtube.com/watch?v='):\n # Defensive argument checking.\n assert isinstance(video_identifier, str), 'video_identifier must be string'\n assert isinstance(output_filename, str), 'output_filename must be string'\n assert len(video_identifier) == 11, 'video_identifier must have length 11'\n\n status = False\n\n if not os.path.exists(output_filename):\n command = [\n 'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',\n '-f', 'mp4', '-o',\n '\"%s\"' % output_filename,\n '\"%s\"' % (url_base + video_identifier)\n ]\n command = ' '.join(command)\n print(command)\n attempts = 0\n while True:\n try:\n subprocess.check_output(\n command, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError:\n attempts += 1\n if attempts == num_attempts:\n return status, 'Fail'\n else:\n break\n # Check if the video was successfully saved.\n status = os.path.exists(output_filename)\n return status, 'Downloaded'", "def download_wrapper(youtube_id, output_dir):\n # we do this to align with names in annotations\n output_filename = os.path.join(output_dir, youtube_id + '.mp4')\n if os.path.exists(output_filename):\n status = tuple([youtube_id, True, 'Exists'])\n return status\n\n downloaded, log = download(youtube_id, output_filename)\n status = tuple([youtube_id, downloaded, log])\n return status", "def google_youtube_details(vidid):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\t# TODO: make module option for safesearch\n\td = {\"id\" : quote(vidid.encode(\"utf-8\")), \"part\" : \"contentDetails,id,snippet,statistics,status\", \"key\" : API_KEY}\n\t\n\tf = urlopen(YOUTUBE_INFO_URL % (urlencode(d)))\n\tytdata = load(f)\n\tif f.getcode() == 200:\n\t\tif \"items\" in ytdata:\n\t\t\tresults = ytdata[\"items\"]\n\t\t\tif len(results) == 0:\n\t\t\t\treturn None\n\t\t\treturn results[0]\n\telse:\n\t\traise RuntimeError(\"Error (%s): %s\" % (f.getcode(), ytdata.replace(\"\\n\", \" \")))", "def download_videos(download_limit=6):\n videos = []\n for fname in os.listdir('yt_api_data'):\n videos += load_video_data(fname)\n vids_downloaded = 0\n excluded_vids = get_excluded_videos()\n for video_id, title in videos:\n if download_limit != 'all' and vids_downloaded == download_limit:\n break\n title = title.replace(' ', '_')\n mkv_path = \"videos/\" + title + \".mkv\"\n mp4_path = \"videos/\" + title + \".mp4\"\n download_fpath = \"videos/\" + title\n if not check_excluded_list(excluded_vids, title) and not os.path.isfile(mkv_path) and not os.path.isfile(mp4_path):\n print(colored(str(vids_downloaded + 1) + \": \", \"yellow\") + colored(video_id + \" downloading: \" + download_fpath, \"green\"))\n command_prefix = \"youtube-dl -o \" + download_fpath\n if video_id[0] == '-': \n os.system(command_prefix + \" -- \" + video_id)\n else:\n os.system(command_prefix + \" \" + video_id)\n vids_downloaded += 1\n else:\n print(colored(\"skipping download: \" + title + \"with youtube_id: \" + video_id, \"yellow\"))", "def download_vid(vid_link, quality_num=None):\r\n if quality_num is not None:\r\n # if quality_num provided\r\n try:\r\n os.system(\"youtube-dl -f \"+str(quality_num)+\" \\'\"+str(vid_link)+\"\\'\")\r\n except Exception:\r\n print(Exception)\r\n else:\r\n # by default the best quality is downloaded\r\n try:\r\n os.system(\"youtube-dl \"+str(vid_link))\r\n except Exception:\r\n print(Exception)", "def youtube_id_to_url(yt_video_id):\n return 'https://www.youtube.com/watch?v=' + yt_video_id", "def download(target_url):\n program_location = sys.executable\n program_name = \"youtube-dl.exe\"\n # Define arguments. see this url for help\n # https://github.com/rg3/youtube-dl\n ignore_errors = \"-i\"\n safe_filenames = \"--restrict-filenames\"\n output_arg = \"-o\"\n output_template = \"download\\%(uploader)s\\%(playlist)s\\%(title)s-%(id)s.%(ext)s\"\n command = [program_name, ignore_errors, safe_filenames, output_arg, output_template, target_url]\n result = subprocess.call(command)\n print \"Command result: \", result", "def download(self, url=None):\n if url is None:\n if self.results is None:\n raise ValueError(\"Please specify a valid url.\")\n else:\n url = self.results[0]\n try:\n meta = pafy.new(url)\n except Exception:\n raise IOError(\"Video not available for download.\")\n\n vid = meta.getbest()\n path = vid.download()\n self.videos.append(path)\n return path", "def make_video_url(movie_id, api_key):\n\n MOVIE_URL = \"https://api.themoviedb.org/3/movie/\"\n LANG = \"&language=en-US\"\n # Find the youtube key for video trailer\n connection = requests.get(MOVIE_URL + str(movie_id) +\n \"/videos?api_key=\" + api_key + LANG)\n videos_json = json.loads(connection.text)\n connection.close()\n\n if connection.status_code != 200:\n # constant in case issue is found with connection....\n return VIDEO_URL + '5PSNL1qE6VY'\n else:\n if len(videos_json['results']) == 0:\n # constant in case no video is found for given movie....\n return VIDEO_URL + '5PSNL1qE6VY'\n else:\n # If all well we get aa video url for all movie\n # based on discovery or discovery by year\n return VIDEO_URL + videos_json['results'][0]['key']", "def download(video, save_dir, vid):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n print(\"--> downloading {}\".format(video.title))\n\n best = video.getbest(preftype=\"mp4\")\n filename = best.download(\n filepath=os.path.join(save_dir,\n \"{}.{}\".format(vid, best.extension)))\n print(\"--> saved to {}\".format(filename))\n\n return os.path.join(save_dir, \"{}.{}\".format(vid, best.extension))", "def download_vid(item):\n vid_name, vid_id = item\n vid = Video(vid_name, vid_id, resolution='224p')\n vid.download()", "async def youtube(self, ctx, *, query):\n url = f\"https://www.googleapis.com/youtube/v3/search?part=snippet&q={query}&type=video&maxResults=1&key={google_api_key}\"\n response = requests.get(url)\n try:\n await ctx.send(\n f\"https://www.youtube.com/watch?v={response.json()['items'][0]['id']['videoId']}\"\n )\n except IndexError:\n await ctx.send(\"**No results for given query found.**\")", "def get_yt_video_id(url):\n\n from urlparse import urlparse, parse_qs\n\n if url.startswith(('youtu', 'www')):\n url = 'http://' + url\n\n query = urlparse(url)\n\n if 'youtube' in query.hostname:\n if query.path == '/watch':\n return parse_qs(query.query)['v'][0]\n elif query.path.startswith(('/embed/', '/v/')):\n return query.path.split('/')[2]\n elif 'youtu.be' in query.hostname:\n return query.path[1:]\n else:\n raise ValueError", "async def download_video(event):\n url = event.pattern_match.group(1)\n rmsg = await event.get_reply_message()\n if not url and rmsg:\n myString = rmsg.text\n url = re.search(\"(?P<url>https?://[^\\s]+)\", myString).group(\"url\")\n if not url:\n return await edit_or_reply(event, \"What I am Supposed to find? Give link\")\n codevent = await edit_or_reply(event, \"`Preparing to download...`\")\n reply_to_id = await reply_id(event)\n ytdl_data = await ytdl_down(codevent, video_opts, url)\n if ytdl_down is None:\n return\n f = pathlib.Path(f\"{ytdl_data['title']}.mp4\".replace(\"|\", \"_\"))\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.jpg\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.webp\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = None\n await codevent.edit(\n f\"`Preparing to upload video:`\\\n \\n**{ytdl_data['title']}**\\\n \\nby *{ytdl_data['uploader']}*\"\n )\n ul = io.open(f, \"rb\")\n c_time = time.time()\n attributes, mime_type = await fix_attributes(f, ytdl_data, supports_streaming=True)\n uploaded = await event.client.fast_upload_file(\n file=ul,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(d, t, codevent, c_time, \"upload\", file_name=f)\n ),\n )\n ul.close()\n media = types.InputMediaUploadedDocument(\n file=uploaded,\n mime_type=mime_type,\n attributes=attributes,\n thumb=await event.client.upload_file(codthumb) if codthumb else None,\n )\n await event.client.send_file(\n event.chat_id,\n file=media,\n reply_to=reply_to_id,\n caption=ytdl_data[\"title\"],\n )\n os.remove(f)\n if codthumb:\n os.remove(codthumb)\n await event.delete()", "def test_get_video_id_from_url(self):\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/embed/DqGwxR_0d1M'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://youtu.be/DqGwxR_0d1M'), 'DqGwxR_0d1M')\n self.assertEqual(\n get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M&feature=youtu.be'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M'),\n 'DqGwxR_0d1M')", "def __init__(self, url, params=None):\n super(YoutubeVideo, self).__init__(url, params)\n self.video_id = self.get_video_id()", "def youtube_cmd(ctx, cmd, arg):\n\t\turl = \"http://gdata.youtube.com/feeds/api/videos?q=%s&max-results=3&v=2\" % urllib.parse.quote(arg)\n\t\tr = urllib.request.urlopen(url)\n\t\tr = dom.parse(r)\n\t\t\t\n\t\tresults = int(r.getElementsByTagName(\"openSearch:totalResults\")[0].firstChild.data)\n\n\t\tif results > 0:\n\t\t\tres = min(results, 3)\n\t\t\tctx.reply(\"Results 1-%d out of %s\" % (res, prettyNumber(results)), \"YouTube\")\n\t\telse:\n\t\t\tctx.reply(\"No results found for %s\" % arg, \"YouTube\")\n\n\t\tfor i in r.getElementsByTagName(\"entry\"):\n\t\t\tvid = i.getElementsByTagName(\"id\")[0].firstChild.data\n\t\t\tvid = vid.split(\":\")[-1]\n\n\t\t\tdisplayMeta(ctx, i, vid)", "def query_youtube(movie_title):\n #convert movie_title to “percent-encoded” string, then open search\n query_string = urllib.urlencode({\"search_query\" : movie_title + \" trailer\"})\n html_content = urllib.urlopen(\"http://www.youtube.com/results?\" +\n query_string)\n #use regular expressions to find all 11 character videos IDs\n query_results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})',\n html_content.read())\n return \"http://www.youtube.com/watch?v=\" + query_results[0]", "def download_audio_from_youtube(youtube_link: str):\r\n with st.spinner(\"Extracting audio from Youtube...\"):\r\n try:\r\n a = pytube.YouTube(youtube_link).streams.first().download('files/','video_for_audio') # Download video from youtube\r\n b = ffmpg.ffmpeg_extract_audio('files/video_for_audio.mp4','files/audio.mp3') # extract sound and save as mp3\r\n os.remove('files/video_for_audio.mp4') # remove unecessary video\r\n # Release the process from the downloaded files\r\n del a, b\r\n st.success(\"Sound was extracted successfully from the youtube video!\")\r\n except:\r\n st.error(\"Unexpected error has occured, please try again!\")", "def youtube_dl_latest(args=None):\n args = parse_youtube_dl_arguments(args=args)\n download_videos(channels_file=args.channels_file, hierarchy=args.hierarchy)", "def _get_trailer_url(self, movie_id):\n payload = {\n 'api_key': self.api_key,\n 'language': self.language\n }\n\n youtube_base_url = 'https://www.youtube.com/watch?v='\n url = self.BASE_URL + str(movie_id) + '/videos'\n resp = requests.get(url, params=payload)\n\n if resp.status_code == requests.codes.ok:\n video_results = resp.json()['results']\n for video in video_results:\n if video['type'] == u'Trailer':\n return youtube_base_url + video['key']\n\n print 'Couldnt find a trailer for the movie, returning placeholder'\n print 'video trailer.'\n return 'https://www.youtube.com/watch?v=FnCdOQsX5kc'\n else:\n # Throws an error if the movie_id is not illegible\n resp.raise_for_status()", "def get_yt_link_by_id(video_id):\n info = InnerTube().player(video_id)\n det = info.get(\"videoDetails\", None)\n title = det.get(\"title\", None) if det else None\n streaming_data = info.get(\"streamingData\", None)\n fmts = streaming_data.get(\"formats\", None) if streaming_data else None\n\n if fmts:\n links = {Quality[i[\"itag\"]]: i[\"url\"] for i in fmts if i.get(\"itag\", -1) in Quality and \"url\" in i}\n\n if links and title:\n return links, title.replace(\"+\", \" \")\n\n cause = None\n status = info.get(\"playabilityStatus\", None)\n if status:\n cause = f\"[{status.get('status', '')}] {status.get('reason', '')}\"\n\n log(f\"{__class__.__name__}: Getting link to video with id '{video_id}' filed! Cause: {cause}\")\n\n return None, cause", "async def download(self, ctx, *, song):\n try:\n with youtube_dl.YoutubeDL(ytdl_download_format_options) as ydl:\n if \"https://www.youtube.com/\" in song:\n download = ydl.extract_info(song, True)\n else:\n infosearched = ydl.extract_info(\n \"ytsearch:\"+song, False)\n download = ydl.extract_info(\n infosearched['entries'][0]['webpage_url'], True)\n filename = ydl.prepare_filename(download)\n embed = discord.Embed(\n title=\"Your download is ready\", description=\"Please wait a moment while the file is beeing uploaded\")\n await ctx.send(embed=embed, delete_after=30)\n await ctx.send(file=discord.File(filename))\n os.remove(filename)\n except (youtube_dl.utils.ExtractorError, youtube_dl.utils.DownloadError):\n embed = discord.Embed(title=\"Song couldn't be downloaded\", description=(\"Song:\"+song))\n await ctx.send(embed=embed)", "async def youtube(self, ctx, *, query):\r\n\r\n utub = 'https://youtube.com/results?search_query='\r\n url = utub + query.replace(\" \", \"+\")\r\n r = requests.get(url).text\r\n num1 = r.find('{\"videoRenderer')\r\n num2 = r.find('{\"videoRenderer', num1+1)\r\n # print (num1)\r\n # print (num2)\r\n videoRenderer = (json.loads(r[num1:num2-1])[\"videoRenderer\"])\r\n vid = (videoRenderer[\"videoId\"])\r\n page = (\"https://youtube.com/watch?v=\" + vid)\r\n await ctx.send(page)", "def get_video_data(id, fetch_all_videos=True):\n youtube_data = _youtube_feed('videos', id)['entry']\n return Playlist(None, 1, [_get_video_data(youtube_data)], None)", "def compose_embed_youtube(video_id = None):\n assert(video_id != None)\n return \"http://www.youtube.com/embed/{0}?enablejsapi=1&wmode=opaque\".format(\n video_id\n )", "def download_video_data(self):\n\n def scrape_url(url):\n \"\"\"Scrape the video list, youtube_dl does all the heavy lifting\"\"\"\n ydl_opts = {\n \"ignoreerrors\": True, # Skip private and unavaliable videos\n }\n\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result_ydl = ydl.extract_info(\n url,\n download=False # No download needed, only the info\n )\n\n logger.debug('Url scraped {}', url)\n if 'entries' in result_ydl:\n # It's a playlist or a list of videos\n return result_ydl['entries']\n # Just a video\n return [result_ydl]\n\n youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])\n for youtube_video_data in youtube_list:\n if youtube_video_data: # Valid video\n self.youtube_videos.append(\n Video.from_youtube(\n video_data=youtube_video_data, event=self))\n else:\n logger.warning('Null youtube video')", "def downloadvideo(filename):\n url = \"http://openings.moe/video/\" + filename\n f = getfile(url)\n safeprint(Colors.PURPLE + url + Colors.END + \":\\nSaving to --> \" + Colors.YELLOW + filename + Colors.END)\n with open(os.path.basename(url), \"wb\") as local_file:\n try:\n local_file.write(f.read())\n except IOError as e:\n safeprint(\"An error occurred while saving the file, try again. \" + str(e))", "def tekstowo_youtube_url(source):\n reg = re.compile(r\"var videoID = \\\"(.*)\\\";\")\n try:\n video_id = reg.search(source).group(1)\n except Exception:\n raise Exception(ERROR_STR + '[crawler] cannot find videoID')\n if not video_id:\n raise Exception(ERROR_STR + '[crawler] empty videoID')\n\n return \"https://www.youtube.com/watch?v=\" + video_id", "def youtubefetch(url,outputp=''):\n\toutputpath = os.path.expanduser(outputp)\n\tif (os.path.exists(outputpath) & os.path.isdir(outputpath)) != True:\n\t\toutputpath = '/tmp/'\n\t\n\t(_,_,urlproper) = url.partition(\"?\")\n\t(urlproper,_,_) = urlproper.partition(\"&\")\n\turlproper = \"http://proxy.cs.tcd.ie:8080/www.youtube.com/watch?\" + urlproper\n\tpage = urllib2.urlopen(url).readlines()\n\tfilteredpage = [ elem for elem in page if elem.find(\"fullscreenUrl\") != -1 ]\n\tif (len(filteredpage) == 0):\n\t\treturn 'failed'\n\t\t\n\tfilteredpage = filteredpage[0]\n\t(_, p1, partialurl) = filteredpage.partition(\"video_id=\")\n\t(partialurl , _, name) = partialurl.rpartition(\"&title=\")\n\t(name,_,_) = name.partition(\"'\")\n\tvideourl = \"http://www.youtube.com/get_video.php?\" + p1 + partialurl\n\tvideo = urllib2.urlopen(videourl).read()\n\t#print videourl\n\t#print name\n\toutputfile = open((outputpath+name+\".flv\"),'wb')\n\toutputfile.write(video)\n\toutputfile.flush()\n\toutputfile.close()\n\treturn outputpath+name+\".flv\"", "async def video(ctx, message):\n \"\"\":param: ctx\"\"\"\n \"\"\":param: message\"\"\"\n \"\"\"return video url\"\"\"\n link_list = []\n print ('Searching YouTube for: %s' % message)\n url = \"https://www.youtube.com/results?search_query=\" + message\n response = urlopen(url)\n html = response.read()\n soup = BeautifulSoup(html, \"lxml\")\n for vid in soup.findAll(attrs={'class': 'yt-uix-tile-link'}):\n link_list.append('https://www.youtube.com' + vid['href'])\n if(len(link_list) >=1):\n random_num = random.randint(0, len(link_list) - 1)\n await bot.say(link_list[random_num])\n else:\n await bot.say(\"there is no contente for \"+message)", "def download(dltype, num):\n # This function needs refactoring!\n # pylint: disable=R0912\n # pylint: disable=R0914\n if g.browse_mode == \"ytpl\" and dltype in (\"da\", \"dv\"):\n plid = g.ytpls[int(num) - 1][\"link\"]\n down_plist(dltype, plid)\n return\n\n elif g.browse_mode == \"ytpl\":\n g.message = \"Use da or dv to specify audio / video playlist download\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n elif g.browse_mode != \"normal\":\n g.message = \"Download must refer to a specific video item\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n writestatus(\"Fetching video info...\")\n song = (g.model.songs[int(num) - 1])\n best = dltype.startswith(\"dv\") or dltype.startswith(\"da\")\n\n if not best:\n\n try:\n # user prompt for download stream\n url, ext, url_au, ext_au = prompt_dl(song)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download aborted!\" + c.w\n g.content = generate_songlist_display()\n return\n\n if not url or ext_au == \"abort\":\n # abort on invalid stream selection\n g.content = generate_songlist_display()\n g.message = \"%sNo download selected / invalid input%s\" % (c.y, c.w)\n return\n\n else:\n # download user selected stream(s)\n filename = _make_fname(song, ext)\n args = (song, filename, url)\n\n if url_au and ext_au:\n # downloading video and audio stream for muxing\n audio = False\n filename_au = _make_fname(song, ext_au)\n args_au = (song, filename_au, url_au)\n\n else:\n audio = ext in (\"m4a\", \"ogg\")\n\n kwargs = dict(audio=audio)\n\n elif best:\n # set updownload without prompt\n url_au = None\n av = \"audio\" if dltype.startswith(\"da\") else \"video\"\n audio = av == \"audio\"\n filename = _make_fname(song, None, av=av)\n args = (song, filename)\n kwargs = dict(url=None, audio=audio)\n\n try:\n # perform download(s)\n dl_filenames = [args[1]]\n f = _download(*args, **kwargs)\n if f:\n g.message = \"Saved to \" + c.g + f + c.w\n\n if url_au:\n dl_filenames += [args_au[1]]\n _download(*args_au, allow_transcode=False, **kwargs)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download halted!\" + c.w\n\n try:\n for downloaded in dl_filenames:\n os.remove(downloaded)\n\n except IOError:\n pass\n\n if url_au:\n # multiplex\n mux_cmd = \"APP -i VIDEO -i AUDIO -c copy OUTPUT\".split()\n mux_cmd = \"%s -i %s -i %s -c copy %s\"\n mux_cmd = [g.muxapp, \"-i\", args[1], \"-i\", args_au[1], \"-c\",\n \"copy\", args[1][:-3] + \"mp4\"]\n\n try:\n subprocess.call(mux_cmd)\n g.message = \"Saved to :\" + c.g + mux_cmd[7] + c.w\n os.remove(args[1])\n os.remove(args_au[1])\n\n except KeyboardInterrupt:\n g.message = \"Audio/Video multiplex aborted!\"\n\n g.content = generate_songlist_display()", "def get_yt_link(self, video_id, url=None, skip_errors=False):\n if self._settings.enable_yt_dl and url:\n if not self._yt_dl:\n self._yt_dl = YouTubeDL.get_instance(self._settings, self._callback)\n if not self._yt_dl:\n raise YouTubeException(\"yt-dlp initialization error.\")\n return self._yt_dl.get_yt_link(url, skip_errors)\n\n return self.get_yt_link_by_id(video_id)", "def download_video(video_stream):\n global file_size\n file_size = size_in_mb(video_stream.filesize)\n home_dir = os.environ['HOME']\n path = f'{home_dir}/Downloads/Video'\n print('-'*60)\n print(f'Filename:\\t{video_stream.title}')\n print(f'Location:\\t{path}')\n print(f'Size:\\t\\t{file_size} MB\\n')\n\n filename = video_stream.title + '_video.mp4'\n filename = filename.replace('/', ' ')\n filename = filename.replace('\\\\', ' ')\n\n if os.path.exists(os.path.join(path, filename)):\n print(\"The file has been already downloaded.\")\n sys.exit()\n \n video_stream.download(path, filename)", "def get_video_id(self):\n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.youtube_fix_url(self.original_url))\n if p.path == '/watch':\n # Url of type http://www.youtube.com/watch?v=KRaeHxwZvms&feature=g-u-u&context=G2b00124FUAAAAAAAAAA\n #logger.debug('is a watch')\n params = cgi.parse_qs(p.query)\n if 'v' in params:\n return params['v'][0]\n elif p.fragment.startswith('/watch?v='):\n # sample. http://m.youtube.com/#/watch?v=ZXkW1-HdRC8\n params = cgi.parse_qs(p.fragment)\n if '/watch?v' in params:\n return params['/watch?v'][0]\n elif p.path.startswith('/v/') or p.path.startswith('/embed/'):\n path = p.path.split('/')\n return path[-1]\n elif p.netloc == 'youtu.be':\n return p.path[1:]\n elif re.match('(.{1}/){3}([\\w+-_^/]+)', p.fragment):\n parts = p.fragment.split('/')\n return parts[-1]\n return ''", "def download_video_url(\n video_url: str,\n pipeline: PipelineContext,\n destination=\"%(title)s.%(ext)s\",\n progress=ProgressMonitor.NULL,\n):\n\n config = pipeline.config\n logger = logging.getLogger(__name__)\n logger.info(\"Starting video download from URL: %s\", video_url)\n\n # Setup progress-tracking\n progress.scale(total_work=1.0)\n progress_tracker = YDLProgressTracker(show_progress_bar=True)\n\n # Resolve destination path template\n output_template = complete_template(config.sources.root, destination)\n logger.info(\"Output template: %s\", output_template)\n\n ydl_opts = {\n \"format\": \"mp4\",\n \"logger\": YDLLogger(logger),\n \"progress_hooks\": [progress_tracker.hook],\n \"outtmpl\": output_template,\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n # Determine destination file name\n video_info = ydl.extract_info(video_url, download=False)\n file_name = ydl.prepare_filename(video_info)\n logger.info(\"Downloading file: %s\", file_name)\n\n # Download file\n with progress_tracker.track(progress):\n ydl.download([video_url])\n\n progress.complete()\n return file_name", "def video_download_chain_task(uuid):\n chain = signature(\n 'wts_worker.worker.video_download',\n kwargs={'uuid': uuid},\n )\n chain |= signature(\n 'wts_worker.worker.video_register_title',\n kwargs={'uuid': uuid},\n )\n return chain.apply_async()", "def get_video_info(self, id, **kwargs):\n kwargs['id'] = id\n return self.get('info/video.json', **kwargs)", "def sync_get_youtube_video(query):\n return yt.get_video_info(query, num_results=1)", "def download_video(video_url, output_path, output_name=\"\", default_type=\"mp4\", verbose=False):\n try:\n if \".\" not in output_name:\n output_name = f\"{output_name}.{default_type}\"\n output_path = os.path.join(output_path, output_name)\n api_response = core.get_request_with_retries(video_url)\n core_utils.print_if_verbose('Processing...', verbose)\n f = open(output_path, 'wb')\n for chunk in api_response.iter_content(chunk_size=255):\n # filter out keep-alive new chunks\n if chunk:\n f.write(chunk)\n core_utils.print_if_verbose(f'The video has been exported here: {output_path}', verbose)\n f.close()\n except Exception as exception_msg:\n print(f\"The video could not be downloaded due to the following error: {exception_msg}\")\n return", "def vid_player(video_url, id=None, width='100%', height='auto'):\n return {'video_url': video_url, 'width': width, 'height': height, 'id': id }", "def get(idd):\n print(f'[{script}]: Collecting data for \"{idd}\"...') if verbosity >= 1 else None\n\n youtube = build(\"youtube\", \"v3\", developerKey=API_KEY)\n request = youtube.videos().list(\n part=\"snippet, contentDetails, statistics\",\n chart=None,\n hl=None,\n id=idd,\n locale=None,\n maxHeight=None,\n maxResults=None,\n maxWidth=None,\n myRating=None,\n onBehalfOfContentOwner=None,\n pageToken=None,\n regionCode=None,\n videoCategoryId=None,\n )\n response = request.execute()\n response = response[\"items\"][0]\n\n data = {\n \"title\": response[\"snippet\"][\"title\"],\n \"idd\": idd,\n \"thumbnail\": response[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"],\n \"author\": response[\"snippet\"][\"channelTitle\"],\n \"published\": datetime.strptime(\n response[\"snippet\"][\"publishedAt\"], \"%Y-%m-%dT%H:%M:%S%z\"\n ),\n \"description\": response[\"snippet\"][\"description\"],\n \"duration\": isodate.parse_duration(\n response[\"contentDetails\"][\"duration\"]\n ).total_seconds(),\n \"views\": response[\"statistics\"][\"viewCount\"],\n }\n\n try:\n data[\"rating\"] = int(response[\"statistics\"][\"likeCount\"]) / (\n int(response[\"statistics\"][\"likeCount\"])\n + int(response[\"statistics\"][\"dislikeCount\"])\n )\n except KeyError:\n print(\n f'[{script}]: WARNING: \"{idd}\" does not have ratings enabled.'\n ) if verbosity >= 2 else None\n data[\"rating\"] = -1\n\n try:\n data[\"comment_count\"] = response[\"statistics\"][\"commentCount\"]\n except KeyError:\n print(\n f'[{script}]: WARNING: \"{idd}\" does not have comments enabled.'\n ) if verbosity >= 2 else None\n data[\"comment_count\"] = -1\n\n print(f\"[{script}]: Got video data.\") if verbosity >= 2 else None\n return data", "def download_file(id, output=DATA_DIR, quiet=False):\n url = f\"https://drive.google.com/uc?id={id}\"\n gdown.download(url, output=output, quiet=quiet)", "def download_ostrich_video(download_to_path):\n urlretrieve(REMOTE_OSTRICH_VID_PATH, download_to_path)", "def __ext_embed_id(self, youtube_url):\n youtube_id_match = re.search(r'(?<=v=)[^&#]+', youtube_url)\n youtube_id_match = youtube_id_match or re.search(\n r'(?<=be/)[^&#]+', youtube_url)\n trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match\n else None)\n return trailer_youtube_id", "def download(track_id, ext):\n\n if ext != 'mp3':\n return Response('', status=404)\n\n track = models.Track.query.get(track_id)\n if track is None:\n abort(404)\n\n track_file = open(track.get_path(), 'r')\n filename_header = (\n 'Content-Disposition', 'attachment; filename=\"%s.mp3\"' % track.title\n )\n\n return Response(response=track_file.read(), mimetype='audio/mpeg',\n headers=[filename_header])", "def web_archive_insert_video(id):\n\n db = get_db()\n user_id = flask.session['user']['id']\n\n video_id = id\n video = yt_get_video(video_id)\n channel_id = video['snippet']['channelId']\n\n archive = None\n for playlist in db_get_archives():\n if playlist['contentDetails']['itemCount'] < 5000:\n archive = playlist\n break\n\n if archive is None:\n archive = yt_create_playlist()\n\n if yt_insert_to_playlist(video_id, archive['id']):\n if channel_id not in db[user_id]:\n db[user_id][channel_id] = {\n 'played': {}, 'archived': {}\n }\n db[user_id][channel_id]['archived'][video_id] = archive['id']\n update_db(db)", "def download_by_link(link: str, videoid: str) -> [str, str]:\n\t# set youtube_dl arguments \n\tydl_opts = {\n\t\t'quiet': False, # don't write in output\n\t\t'no_warnings': True, # write warnings in output\n\t\t'format': \"bestaudio/best\", # download best audio quality\n\t\t'format': 'mp4', # setup format webm\n\t\t'outtmpl': '%(name)s' + str(videoid) + '.%(ext)s', # setup output name \n\t\t'postprocessor': [{ # dk how this need work, but if this not setup audio didn't download\n\t\t\t'key': \"FFmpegExtractAudioPP\",\n\t\t\t'preferredquality': \"512\",\n\t\t }],\n\t}\n\t# start download audio\n\twith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n\t\tdata = ydl.extract_info(link) # exctrat info about audio\n\tfake_name = \"NA\" + str(videoid)\n\t# TODO: think about this query \n\t# refactoring title \n\ttitle = data.pop('title')\n\ttitle = re.sub(r'[^\\w]', ' ', title)\n\ttitle = translate(title)\n\ttitle = title.replace(' ', '_')\n\treturn fake_name, title", "def video_detail(request, video_id):\n\n video = get_object_or_404(Video, id=video_id)\n\n context = {\"video\": video}\n\n return render(request, 'video_detail.html', context)", "def download(df_shorter,folderName):\n os.mkdir(str(folderName))\n path = os.getcwd()+'\\\\'+str(folderName)+'\\\\'\n #add column with video link generated from IDs\n df_shorter['urls'] = df_shorter['id'].apply(lambda x: generateLinkFromId(x))\n vid_dl = []\n i = 1\n for url in df_shorter['urls']:\n if url != False:\n name = str(i)+'.mp4'\n vid_dl.append(wget.download(url,path+name))#retrun the path of the saved video\n i = i+1\n return vid_dl", "def generateLinkFromId(videoId):\n page = requests.get('https://www.tiktok.com/embed/v2/'+videoId+'?lang=en')\n tree = html.fromstring(page.content)\n buyers = tree.xpath('//*[@id=\"main\"]/div/div/div[1]/div/div/div/div[2]/div[1]/video/@src')\n if len(buyers) > 0:\n return buyers[0]\n else:\n return False", "def _render_no_tracking(self, video_id):\n you_tube_url = (\n 'https://www.youtube.com/embed/%s'\n '?feature=player_embedded&amp;rel=0') % video_id\n iframe = cElementTree.XML(\"\"\"\n<div class=\"gcb-video-container\">\n <iframe class=\"youtube-player\" title=\"YouTube Video Player\"\n type=\"text/html\" frameborder=\"0\" allowfullscreen=\"allowfullscreen\">\n </iframe>\n</div>\"\"\")\n iframe[0].set('src', you_tube_url)\n return iframe", "def google_youtube_check(id):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\td = {\"id\" : quote(id.encode(\"utf-8\")), \"part\" : \"id,status\", \"key\" : API_KEY}\n\t\n\tf = urlopen(YOUTUBE_INFO_URL % (urlencode(d)))\n\tytdata = load(f)\n\tif not ytdata.get(\"items\"): # if there are no items for the ID search, return False\n\t\treturn False\n\treturn True", "def play_video(self):\n\n self.wait.until(self.visible((By.ID, \"video-title\")))\n self.driver.find_element_by_xpath(\"//button[@class='ytp-large-play-button ytp-button']\").click()", "def download_file_from_google_drive(file_id, dest_path, verbose=False):\n\n destination_directory = dirname(dest_path)\n if len(destination_directory) > 0 and not exists(destination_directory):\n makedirs(destination_directory)\n\n session = requests.Session()\n\n if verbose:\n print('Downloading file with Google ID {} into {}... '.format(file_id, dest_path), end='')\n stdout.flush()\n\n response = session.get(Constant.DOWNLOAD_URL, params={'id': file_id}, stream=True)\n\n token = get_confirm_token(response)\n if token:\n params = {'id': file_id, 'confirm': token}\n response = session.get(Constant.DOWNLOAD_URL, params=params, stream=True)\n\n save_response_content(response, dest_path)\n if verbose:\n print('Download completed.')", "def select(self, video_id):\n\n query = \"\"\"\n SELECT id, uri, filename, description\n FROM videos\n WHERE id = ?\n \"\"\"\n\n result = Model.execute(query, (video_id,))\n\n return result.fetchone()", "def get_video_id(url):\n\n if not url:\n return \"\"\n\n # If URL is embedded\n if \"embed\" in url:\n return url.split(\"/\")[-1]\n\n parse_result = urlparse(url)\n query = parse_qs(parse_result.query)\n return query[\"v\"][0]", "def playlist_videos(playlist_id):\r\n url = PLAYLIST_ITEMS_URL.format(API_KEY, playlist_id)\r\n response = util.web.http_get(url=url, json=True, referer='https://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n video_list = []\r\n # next_page_token = response['json']['nextPageToken']\r\n try:\r\n if 'items' in response['json']:\r\n for item in response['json']['items']:\r\n video_id = item['snippet']['resourceId']['videoId']\r\n details = video_details(video_id)\r\n if details is not None:\r\n info = {\r\n 'type': 'youTube',\r\n 'video_id': video_id,\r\n 'video_title': details['video_title'],\r\n 'video_time': details['video_time']\r\n }\r\n video_list.append(info)\r\n return video_list\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None", "def upload_youtube(msg):\n url = \"https://www.youtube.com/watch?v=%s\" % msg['yt_videoid']\n fname = \"%s/download_file/%s.mp4\" % (TOP_LEVEL_DIR, randint(1, 1000000000))\n cmd = \"/home/hadn/py4code/bin/youtube-dl -o %s %s\" % (fname, url)\n cmd = shlex.split(cmd)\n up = Popen(cmd, stdout=PIPE)\n temp = up.communicate()\n\n cmd_upload = \"/home/hadn/py4code/bin/python %s/flask_app/crawler/upload_youtube.py --file %s --title '%s'\" % (\n TOP_LEVEL_DIR, fname, msg['title'])\n cmd_upload = shlex.split(cmd_upload)\n up_youtube = Popen(cmd_upload, stdout=PIPE)\n temp_upload = up_youtube.communicate()\n\n print(temp_upload)\n return msg", "def download_subtitles(video_ids):\n print(f\"[downloading subs] {video_ids}\")\n ydl_opts = {\n 'skip_download': True,\n 'writeautomaticsub': True,\n 'outtmpl': 'subtitles/%(id)s',\n 'subtitleslangs': ['en']\n }\n for video_id in video_ids:\n filename = f\"subtitles/{video_id}.en.vtt\"\n # Don't download if these subtitles are already downloaded\n # or if we know there are no subtitles on the video\n if video_id in cached_no_subtitles:\n continue\n if not os.path.isfile(filename):\n print(f\"[downloading subtitle] {video_id}\")\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n # assume downloaded subtitle is correct\n ydl.download([video_id])\n # Delay to be nice to the server\n time.sleep(2)\n try:\n with open(filename) as f:\n yield video_id, f.read()\n except IOError:\n with open(CACHED_NO_SUBTITLES_FILE, \"a\") as f:\n f.write(video_id + '\\n')\n cached_no_subtitles.add(video_id)\n yield video_id, None", "def get_embed_youtube(link = None):\n assert(link != None)\n assert(link != \"\")\n log.debug( \"preparsed link: \" + link)\n video_id = \"\"\n try:\n # break the link\n choppedLink = link.split(\"/\")\n if choppedLink[2].find(\"youtu.be\") >= 0:\n # Parse short link getting only last piece\n video_id = get_id_shortlink(choppedLink)\n elif choppedLink[3].find(\"attribution_link\") >= 0 :\n # Its an attribution link, a bit special\n video_id = get_id_attribution(choppedLink)\n else:\n # This should be a regular link\n video_id = get_id_regular_link(choppedLink)\n\n # and finally compose the embed link\n flink = compose_embed_youtube(video_id)\n log.debug( \"compound link: \" + flink)\n except Exception as e:\n log.error(\"Something weird happened when ending getting embed youtube\")\n log.exception(e)\n raise NotImplementedError( \"We are still working on links like \" + link)\n\n return flink", "def get_videos_in_playlist(self):\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for video in (self. result['entries']):\n video_id = video['id']\n self. url = f'https://www.youtube.com/watch?v={video_id}'\n self. show_formats()", "def download_flickr_video(url, save_root, force_overwrite):\n\n # (Try to) open the URL\n response = urlopen(url)\n # Extract the file extension from the resolved URL\n m = re.match(r'(.*)\\?s=.*', response.url)\n _, ext = os.path.splitext(m.group(1))\n # Build the path to save the video to\n video_meta = parse.parse(VIDEO_URL_FORMAT, url)\n user_id = video_meta['user_id']\n video_id = video_meta['video_id']\n save_path = os.path.join(save_root, f'{user_id}-{video_id}{ext}')\n # Save the video\n if os.path.isfile(save_path) and not force_overwrite:\n raise FileExistsError(f'File already exists at {save_path}')\n else:\n with open(save_path, 'wb') as f:\n shutil.copyfileobj(response, f)\n\n return save_path", "def get_youtube_handler():\n options = {}\n home = os.path.expanduser(\"~\")\n default_credentials = os.path.join(home, \".youtube-upload-credentials.json\")\n #client_secrets = options.client_secrets or os.path.join(home, \".client_secrets.json\")\n #credentials = options.credentials_file or default_credentials\n client_secrets = os.path.join(home, \".client_secrets.json\")\n credentials = default_credentials \n debug(\"Using client secrets: {0}\".format(client_secrets))\n debug(\"Using credentials file: {0}\".format(credentials))\n #get_code_callback = (auth.browser.get_code\n #if options.auth_browser else auth.console.get_code)\n get_code_callback = auth.browser.get_code\n return auth.get_resource(client_secrets, credentials,\n get_code_callback=get_code_callback)", "def __construct_url_from_id(_video_id):\n return f\"{core.get_base_url(api_base=False)}/videos/{_video_id}\"", "def download(self, language, filename, filetype):\n if language not in self.languages.keys():\n print \"Theres's no subtitle in this language\"\n sys.exit()\n url = \"http://www.youtube.com/api/timedtext?v={0}&lang={1}\".format(self.video_id, language)\n self.subtitle = urllib2.urlopen(url)\n if filetype == \"srt\":\n self.writeSRTFile(filename)\n else:\n self.writeXMLFile(filename)", "def fetch_pyvideo_pk(self):\n url = 'http://pyvideo.org/search?models=videos.video&q={0}'.format(self.full_name.replace(\" \", \"+\"))\n soup = BeautifulSoup(requests.get(url).content).findAll(\"a\")\n if soup:\n for link in soup:\n if link.string == self.full_name:\n self.pyvideo_pk = link.get('href').split('/')[2]\n self.save()\n return self.pyvideo_pk\n self.pyvideo_pk = None\n self.save()\n return None", "def add_video_to_playlist(youtube, args, privacy=\"public\"):\n video_id = args['video_id']\n playlist_id = args['playlist_id']\n \n print(video_id)\n #print(type(args))\n \n if playlist_id:\n return add_video_to_existing_playlist(youtube, playlist_id, video_id)\n else:\n lib.debug(\"Error adding video to playlist\")", "async def video_url(cls, url, ytdl, *, loop=None, stream=False):\n loop = loop or asyncio.get_event_loop()\n data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))\n song_list = {'queue': []}\n if 'entries' in data:\n if len(data['entries']) > 1:\n playlist_titles = [title['title'] for title in data['entries']]\n song_list = {'queue': playlist_titles}\n song_list['queue'].pop(0)\n\n data = data['entries'][0]\n\n filename = data['url'] if stream else ytdl.prepare_filename(data)\n return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data), song_list", "def fetch_trailer(self, movie_id):\n movie = tmdbsimple.Movies(movie_id)\n request = movie.videos()\n trailer = movie.results[0]['key']\n\n return trailer", "def video_details(video_id, check=True):\r\n url = VIDEO_DETAILS_URL.format(API_KEY, video_id)\r\n response = util.web.http_get(url=url, json=True, referer='https://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n try:\r\n if 'items' in response['json']:\r\n if len(response['json']['items']) is not 0:\r\n contentdetails = response['json']['items'][0]['contentDetails']\r\n if check:\r\n if 'regionRestriction' in contentdetails:\r\n if 'blocked' in contentdetails['regionRestriction']:\r\n blocked = contentdetails['regionRestriction']['blocked']\r\n if [i for e in ALLOWED_COUNTRIES for i in blocked if e in i]:\r\n log.info('%s is blocked in: %s' %\r\n (video_id, blocked))\r\n return None\r\n if 'allowed' in contentdetails['regionRestriction']:\r\n allowed = contentdetails['regionRestriction']['allowed']\r\n if [i for e in ALLOWED_COUNTRIES for i in allowed if e not in i]:\r\n log.info('%s is allowed in: %s' %\r\n (video_id, allowed))\r\n return None\r\n video_time = util.string_util.convert_to_millisecond(contentdetails['duration'])\r\n video_title = response['json']['items'][0]['snippet']['title'].encode('ascii', 'ignore')\r\n\r\n return {\r\n 'type': 'youTube',\r\n 'video_id': video_id,\r\n 'video_time': video_time,\r\n 'video_title': video_title\r\n }\r\n return None\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None", "def download_videos(blink, save_dir=\"/media\"):\n blink.download_videos(save_dir, since=get_date())", "def download(self, download_id):\r\n return downloads.Downloads(self, download_id)", "def request_id(self):\n select_id = input(\"\\n>>> \")\n select_dict = [format for format in self.result['formats']\n if format['format_id'] == select_id][0]\n filesize = size(select_dict['filesize']\n ) if select_dict['filesize'] else 0\n # url = select_dict['url']\n print(f\"Downloading {self.result['title']}, size={filesize}\")\n self.title = self.result['title']\n for item in [\"(\", \")\", \" \", \",\", \".\", \"'\"]:\n self.title = self.title.replace(item, '_')\n self.title = self.title.replace('__', '_')\n self.download_video(select_id)", "def get_video_id(self, obj):\n return obj.video.id", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n #logger.debug('DAILYMOTION VIDEO FOUND %s' % url)\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/embed/video/') or p.path.startswith('/swf/video/')):\n # http://www.dailymotion.com/embed/video/xmp7zw\n return re.sub('_.+', '', path_list[2])\n elif len(path_list) == 2 and (p.path.startswith('/video/') or p.path.startswith('/swf/')):\n # http://www.dailymotion.com/video/xmp7zw_whatever\n # http://www.dailymotion.com/swf/xmp7zw\n return re.sub('_.+', '', path_list[1])\n \n return ''", "def play_trailer(self):\n webbrowser.open(self.youtube_trailer_url)" ]
[ "0.7745084", "0.7303579", "0.7203833", "0.71133363", "0.70373094", "0.7022766", "0.69767386", "0.6973098", "0.69691336", "0.69578743", "0.69145817", "0.6872059", "0.68037355", "0.6796082", "0.6795651", "0.67516667", "0.6571322", "0.6560047", "0.65302217", "0.65141547", "0.6513327", "0.6511696", "0.6491822", "0.64788663", "0.6476651", "0.6446605", "0.6435045", "0.64282376", "0.64242804", "0.6374236", "0.6368706", "0.6368076", "0.63553596", "0.6344511", "0.63122314", "0.62956583", "0.6284835", "0.62543577", "0.62126166", "0.6195221", "0.61944854", "0.6191643", "0.61824924", "0.61666745", "0.61501634", "0.61476386", "0.6135256", "0.6134161", "0.61200297", "0.61105704", "0.60876447", "0.60781586", "0.60530466", "0.5987342", "0.5985662", "0.5981774", "0.597452", "0.5970539", "0.59704703", "0.5931067", "0.59295976", "0.59284186", "0.5903014", "0.59006155", "0.5861927", "0.5830635", "0.5824812", "0.5824054", "0.5804496", "0.57983035", "0.5793523", "0.57847166", "0.57837677", "0.5754795", "0.575012", "0.57391614", "0.572212", "0.57114565", "0.5706584", "0.56954604", "0.56878084", "0.5687131", "0.568461", "0.5680392", "0.56731844", "0.5671808", "0.5665233", "0.5664644", "0.56617934", "0.56608164", "0.5658029", "0.56576025", "0.56148094", "0.5611423", "0.5610124", "0.5608605", "0.5607025", "0.5593614", "0.55868816", "0.55760217" ]
0.7562303
1
Downloads YouTube videos by URL.
def youtube_download(url, output_dir='.', merge=True, info_only=False): id = match1(url, r'youtu.be/([^/]+)') or parse_query_param(url, 'v') assert id youtube_download_by_id(id, title=None, output_dir=output_dir, merge=merge, info_only=info_only)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_video(self, url):\n yt = YouTube(url)\n yt_filtered = yt.streams.filter(progressive=True, file_extension=\"mp4\")\n yt_resolutions = yt_filtered.order_by(\"resolution\")\n\n # Downloads the first video that fits the description\n video = yt_resolutions.desc().first()\n video.download()\n\n # Returns the filename\n return video.default_filename", "def download_from_youtube():\n linkinput = input(\"Enter the url you want to download: \")\n youtube_object = Youtube(linkinput)\n youtube_object.youtube()", "def download_video_data(self):\n\n def scrape_url(url):\n \"\"\"Scrape the video list, youtube_dl does all the heavy lifting\"\"\"\n ydl_opts = {\n \"ignoreerrors\": True, # Skip private and unavaliable videos\n }\n\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result_ydl = ydl.extract_info(\n url,\n download=False # No download needed, only the info\n )\n\n logger.debug('Url scraped {}', url)\n if 'entries' in result_ydl:\n # It's a playlist or a list of videos\n return result_ydl['entries']\n # Just a video\n return [result_ydl]\n\n youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])\n for youtube_video_data in youtube_list:\n if youtube_video_data: # Valid video\n self.youtube_videos.append(\n Video.from_youtube(\n video_data=youtube_video_data, event=self))\n else:\n logger.warning('Null youtube video')", "def download(self, url=None):\n if url is None:\n if self.results is None:\n raise ValueError(\"Please specify a valid url.\")\n else:\n url = self.results[0]\n try:\n meta = pafy.new(url)\n except Exception:\n raise IOError(\"Video not available for download.\")\n\n vid = meta.getbest()\n path = vid.download()\n self.videos.append(path)\n return path", "def download_videos(download_limit=6):\n videos = []\n for fname in os.listdir('yt_api_data'):\n videos += load_video_data(fname)\n vids_downloaded = 0\n excluded_vids = get_excluded_videos()\n for video_id, title in videos:\n if download_limit != 'all' and vids_downloaded == download_limit:\n break\n title = title.replace(' ', '_')\n mkv_path = \"videos/\" + title + \".mkv\"\n mp4_path = \"videos/\" + title + \".mp4\"\n download_fpath = \"videos/\" + title\n if not check_excluded_list(excluded_vids, title) and not os.path.isfile(mkv_path) and not os.path.isfile(mp4_path):\n print(colored(str(vids_downloaded + 1) + \": \", \"yellow\") + colored(video_id + \" downloading: \" + download_fpath, \"green\"))\n command_prefix = \"youtube-dl -o \" + download_fpath\n if video_id[0] == '-': \n os.system(command_prefix + \" -- \" + video_id)\n else:\n os.system(command_prefix + \" \" + video_id)\n vids_downloaded += 1\n else:\n print(colored(\"skipping download: \" + title + \"with youtube_id: \" + video_id, \"yellow\"))", "def download(target_url):\n program_location = sys.executable\n program_name = \"youtube-dl.exe\"\n # Define arguments. see this url for help\n # https://github.com/rg3/youtube-dl\n ignore_errors = \"-i\"\n safe_filenames = \"--restrict-filenames\"\n output_arg = \"-o\"\n output_template = \"download\\%(uploader)s\\%(playlist)s\\%(title)s-%(id)s.%(ext)s\"\n command = [program_name, ignore_errors, safe_filenames, output_arg, output_template, target_url]\n result = subprocess.call(command)\n print \"Command result: \", result", "def scrape_url(url):\n ydl_opts = {\n \"ignoreerrors\": True, # Skip private and unavaliable videos\n }\n\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result_ydl = ydl.extract_info(\n url,\n download=False # No download needed, only the info\n )\n\n logger.debug('Url scraped {}', url)\n if 'entries' in result_ydl:\n # It's a playlist or a list of videos\n return result_ydl['entries']\n # Just a video\n return [result_ydl]", "def fetch_youtube_url(page_url):\n video_soup = Soup(requests.get(page_url).content)\n for anchor in video_soup('a'):\n if not anchor.has_key('href'):\n continue\n if 'youtube' in anchor['href']:\n return anchor['href']", "async def download_video(v_url):\n reply = await v_url.get_reply_message()\n if v_url.pattern_match.group(2) != \"\":\n url = v_url.pattern_match.group(2)\n elif reply is not None:\n url = reply.message\n url = re.findall(r\"\\bhttps?://.*\\.\\S+\", reply.message)[0]\n else:\n return\n type = (\n v_url.pattern_match.group(1).lower()\n if v_url.pattern_match.group(1) is not None\n else \"a\"\n )\n await v_url.edit(\"`Preparing to download...`\")\n out_folder = Config.TMP_DOWNLOAD_DIRECTORY + \"youtubedl/\"\n Config.TMP_DOWNLOAD_DIRECTORY + \"/thumb_image.jpg\"\n if not os.path.isdir(out_folder):\n os.makedirs(out_folder)\n if type == \"a\":\n opts = {\n \"format\": \"bestaudio\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"key\": \"FFmpegMetadata\",\n \"writethumbnail\": True,\n \"embedthumbnail\": True,\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\n \"key\": \"FFmpegExtractAudio\",\n \"preferredcodec\": \"mp3\",\n \"preferredquality\": \"320\",\n }\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"quiet\": True,\n \"logtostderr\": False,\n }\n video = False\n song = True\n\n elif type == \"v\":\n opts = {\n \"format\": \"best\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"getthumbnail\": True,\n \"embedthumbnail\": True,\n \"xattrs\": True,\n \"writethumbnail\": True,\n \"key\": \"FFmpegMetadata\",\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\"key\": \"FFmpegVideoConvertor\", \"preferedformat\": \"mp4\"},\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"logtostderr\": False,\n \"quiet\": True,\n }\n song = False\n video = True\n\n try:\n await v_url.edit(\"`Fetching playlist data, please wait..`\")\n with YoutubeDL(opts) as ytdl:\n ytdl.extract_info(url)\n # print(ytdl_data['thumbnail'])\n filename = sorted(get_lst_of_files(out_folder, []))\n except DownloadError as DE:\n await v_url.edit(f\"`{str(DE)}`\")\n return\n except ContentTooShortError:\n await v_url.edit(\"`The download content was too short.`\")\n return\n except GeoRestrictedError:\n await v_url.edit(\n \"`Video is not available from your geographic location due to geographic restrictions imposed by a website.`\"\n )\n return\n except MaxDownloadsReached:\n await v_url.edit(\"`Max-downloads limit has been reached.`\")\n return\n except PostProcessingError:\n await v_url.edit(\"`There was an error during post processing.`\")\n return\n except UnavailableVideoError:\n await v_url.edit(\"`Media is not available in the requested format.`\")\n return\n except XAttrMetadataError as XAME:\n await v_url.edit(f\"`{XAME.code}: {XAME.msg}\\n{XAME.reason}`\")\n return\n except ExtractorError:\n await v_url.edit(\"`There was an error during info extraction.`\")\n return\n except Exception as e:\n await v_url.edit(f\"{str(type(e)): {str(e)}}\")\n return\n c_time = time.time()\n await v_url.edit(\"`YouTube Playlist Downloading Processing Now.\\nPlease Wait!`\")\n if song:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = True\n supports_streaming = False\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 180\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n try:\n ytdl_data_name_audio = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_audio[: (len(ytdl_data_name_audio) - 4)]\n + \".jpg\"\n )\n print(ytdl_data_name_audio)\n file_path = single_file\n song_size = file_size(file_path)\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_audio}`\"\n + \"\\n\"\n + f\"Size👉 {song_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n allow_cache=False,\n thumb=thumb,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_audio}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)\n if video:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = False\n supports_streaming = True\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 0\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n # print(ytdl_data)\n # for file in os.listdir(\"./DOWNLOADS/youtubedl/\"):\n # if file.endswith(\".jpg\"):\n # thumb = \"./DOWNLOADS/youtubedl/\" + file\n # print(os.path.join(\"./DOWNLOADS/youtubedl/\", file))\n # image_link = ytdl_data['thumbnail']\n # downloaded_image = wget.download(image_link,out_folder)\n # thumb = ytdl_data_name_video + \".jpg\"\n file_path = single_file\n video_size = file_size(file_path)\n try:\n ytdl_data_name_video = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_video[: (len(ytdl_data_name_video) - 4)]\n + \".jpg\"\n )\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_video}`\"\n + \"\\n\"\n + f\"Size👉 {video_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n thumb=thumb,\n allow_cache=False,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_video}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)", "async def youtube(self, ctx, *args):\n if not args:\n await ctx.send(\"usage: `>youtube [search string]`\")\n return\n search_string = \" \".join(args)\n search_string = urllib.parse.urlencode({'search_query': search_string})\n response = requests.get('http://www.youtube.com/results?' + search_string + \"&hl=en_US&app=desktop\")\n if response.status_code == 200:\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})', response.content.decode())\n try:\n first_result_url = 'http://www.youtube.com/watch?v=' + search_results[0]\n except IndexError:\n with open('downloads/yt_dump.txt', 'w') as f:\n f.write(response.content.decode())\n #print(response.is_redirect)\n return await ctx.send(\"Found nothing!\")\n await ctx.send(first_result_url)\n self.logger.info(misolog.format_log(ctx, f\"{first_result_url}\"))\n else:\n await ctx.send(\"Error: status code \" + str(response.status_code))\n self.logger.info(misolog.format_log(ctx, f\"error{response.status_code}\"))", "def get_yt_video(yt_url):\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'outtmpl': '%(id)s.%(ext)s'\n }\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result = ydl.extract_info(\n #'http://www.youtube.com/watch?v=BaW_jenozKc',\n yt_url,\n download=True # We just want to extract the info\n )\n\n if 'entries' in result:\n # Can be a playlist or a list of videos\n video = result['entries'][0]\n else:\n # Just a video\n video = result\n\n return video", "def download(idd, path):\n print(f'[{script}]: Downloading YT video \"{idd}\"...') if verbosity >= 1 else None\n\n try:\n yt = pytube.YouTube(\"https://www.youtube.com/watch?v=\" + idd)\n stream = yt.streams.filter(progressive=True).first()\n stream.download(path, filename=idd)\n except Exception:\n print(f'[{script}]: Failed download of YT video \"{idd}\".')\n return None\n\n data = {\n \"idd\": idd,\n \"abr\": stream.abr,\n \"acodec\": stream.audio_codec,\n \"bitrate\": stream.bitrate,\n \"codecs\": stream.codecs,\n \"fps\": stream.fps,\n \"mime\": stream.mime_type,\n \"res\": stream.resolution,\n \"vcodec\": stream.video_codec,\n \"size\": stream._filesize,\n \"frames\": stream.fps * yt.length,\n }\n\n file_path = path + \"/\" + data[\"idd\"] + \".mp4\"\n print(\n f'[{script}]: Download successful. Saved to \"{file_path}\".'\n ) if verbosity >= 2 else None\n return data", "def youtube_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False):\n \n raw_video_info = get_content('http://www.youtube.com/get_video_info?video_id=%s' % id)\n video_info = parse.parse_qs(raw_video_info)\n \n if video_info['status'] == ['ok'] and ('use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']):\n title = parse.unquote_plus(video_info['title'][0])\n stream_list = parse.parse_qs(raw_video_info)['url_encoded_fmt_stream_map'][0].split(',')\n \n else:\n # Parse video page when video_info is not usable.\n video_page = get_content('http://www.youtube.com/watch?v=%s' % id)\n ytplayer_config = json.loads(match1(video_page, r'ytplayer.config\\s*=\\s*([^\\n]+);'))\n \n title = ytplayer_config['args']['title']\n stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')\n \n streams = {\n parse.parse_qs(stream)['itag'][0] : parse.parse_qs(stream)\n for stream in stream_list\n }\n \n for codec in yt_codecs:\n itag = str(codec['itag'])\n if itag in streams:\n download_stream = streams[itag]\n break\n \n url = download_stream['url'][0]\n if 'sig' in download_stream:\n sig = download_stream['sig'][0]\n else:\n sig = decrypt_signature(download_stream['s'][0])\n url = '%s&signature=%s' % (url, sig)\n \n type, ext, size = url_info(url)\n \n print_info(site_info, title, type, size)\n if not info_only:\n download_urls([url], title, ext, size, output_dir, merge = merge)", "def get_videos_in_playlist(self):\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for video in (self. result['entries']):\n video_id = video['id']\n self. url = f'https://www.youtube.com/watch?v={video_id}'\n self. show_formats()", "def youtube_dl_latest(args=None):\n args = parse_youtube_dl_arguments(args=args)\n download_videos(channels_file=args.channels_file, hierarchy=args.hierarchy)", "def download_video(self, file_path, video_url, video_creation_time):\r\n logger.debug(\"Downloading video created at \" + _format_timestamp_iso(self.tz, video_creation_time) + \" from \"\r\n + video_url + \" to \" + file_path)\r\n failed = False\r\n try:\r\n self._download_with_api(file_path, video_url)\r\n except Exception as e:\r\n logger.debug(\"Video download failed using TikTokApi: \" + str(e))\r\n failed = True\r\n if not os.path.isfile(file_path):\r\n failed = True\r\n logger.debug(\"No file was created by TikTokApi at \" + file_path)\r\n elif os.stat(file_path).st_size < 1024:\r\n failed = True\r\n try:\r\n os.remove(file_path)\r\n logger.debug(\"Deleted malformed TikTokApi download at \" + file_path)\r\n except Exception as ee:\r\n logger.error(\"Unable to delete malformed TikTokApi download at \" + str(ee))\r\n if failed:\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n try:\r\n logger.debug(\"Falling back to YouTube-dl\")\r\n self.fallback_counter += 1\r\n self._download_with_ytdl(file_path, video_url)\r\n if not os.path.isfile(file_path):\r\n raise AssertionError(\"No file was created by YouTube-dl at \" + file_path)\r\n elif os.stat(file_path).st_size < 1024:\r\n try:\r\n os.remove(file_path)\r\n logger.debug(\"Deleted malformed YouTube-dl download at \" + file_path)\r\n except Exception as ee:\r\n raise AssertionError(\"Malformed file was created at \" + file_path +\r\n \" and could not be removed: \" + str(ee))\r\n raise AssertionError(\"Malformed file was created at \" + file_path + \" and was removed\")\r\n failed = False\r\n except youtube_dl.utils.DownloadError as ee:\r\n logger.error(\"YouTube-dl DownloadError: \" + str(ee))\r\n self.ytdl_downloaderror_counter += 1\r\n failed = True\r\n except Exception as ee:\r\n logger.error(\"Video download failed with YouTube-dl: \" + str(ee))\r\n self.other_error_counter += 1\r\n failed = True\r\n if not failed:\r\n try:\r\n os.utime(file_path, (video_creation_time, video_creation_time))\r\n except Exception as e:\r\n logger.debug(\"Unable to set utime of \" + str(video_creation_time) + \" on file \" + file_path +\r\n \", Error: \" + str(e))\r\n return True\r\n return False", "def download(dltype, num):\n # This function needs refactoring!\n # pylint: disable=R0912\n # pylint: disable=R0914\n if g.browse_mode == \"ytpl\" and dltype in (\"da\", \"dv\"):\n plid = g.ytpls[int(num) - 1][\"link\"]\n down_plist(dltype, plid)\n return\n\n elif g.browse_mode == \"ytpl\":\n g.message = \"Use da or dv to specify audio / video playlist download\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n elif g.browse_mode != \"normal\":\n g.message = \"Download must refer to a specific video item\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n writestatus(\"Fetching video info...\")\n song = (g.model.songs[int(num) - 1])\n best = dltype.startswith(\"dv\") or dltype.startswith(\"da\")\n\n if not best:\n\n try:\n # user prompt for download stream\n url, ext, url_au, ext_au = prompt_dl(song)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download aborted!\" + c.w\n g.content = generate_songlist_display()\n return\n\n if not url or ext_au == \"abort\":\n # abort on invalid stream selection\n g.content = generate_songlist_display()\n g.message = \"%sNo download selected / invalid input%s\" % (c.y, c.w)\n return\n\n else:\n # download user selected stream(s)\n filename = _make_fname(song, ext)\n args = (song, filename, url)\n\n if url_au and ext_au:\n # downloading video and audio stream for muxing\n audio = False\n filename_au = _make_fname(song, ext_au)\n args_au = (song, filename_au, url_au)\n\n else:\n audio = ext in (\"m4a\", \"ogg\")\n\n kwargs = dict(audio=audio)\n\n elif best:\n # set updownload without prompt\n url_au = None\n av = \"audio\" if dltype.startswith(\"da\") else \"video\"\n audio = av == \"audio\"\n filename = _make_fname(song, None, av=av)\n args = (song, filename)\n kwargs = dict(url=None, audio=audio)\n\n try:\n # perform download(s)\n dl_filenames = [args[1]]\n f = _download(*args, **kwargs)\n if f:\n g.message = \"Saved to \" + c.g + f + c.w\n\n if url_au:\n dl_filenames += [args_au[1]]\n _download(*args_au, allow_transcode=False, **kwargs)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download halted!\" + c.w\n\n try:\n for downloaded in dl_filenames:\n os.remove(downloaded)\n\n except IOError:\n pass\n\n if url_au:\n # multiplex\n mux_cmd = \"APP -i VIDEO -i AUDIO -c copy OUTPUT\".split()\n mux_cmd = \"%s -i %s -i %s -c copy %s\"\n mux_cmd = [g.muxapp, \"-i\", args[1], \"-i\", args_au[1], \"-c\",\n \"copy\", args[1][:-3] + \"mp4\"]\n\n try:\n subprocess.call(mux_cmd)\n g.message = \"Saved to :\" + c.g + mux_cmd[7] + c.w\n os.remove(args[1])\n os.remove(args_au[1])\n\n except KeyboardInterrupt:\n g.message = \"Audio/Video multiplex aborted!\"\n\n g.content = generate_songlist_display()", "async def video_url(cls, url, ytdl, *, loop=None, stream=False):\n loop = loop or asyncio.get_event_loop()\n data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))\n song_list = {'queue': []}\n if 'entries' in data:\n if len(data['entries']) > 1:\n playlist_titles = [title['title'] for title in data['entries']]\n song_list = {'queue': playlist_titles}\n song_list['queue'].pop(0)\n\n data = data['entries'][0]\n\n filename = data['url'] if stream else ytdl.prepare_filename(data)\n return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data), song_list", "def download_all(self):\r\n download_path = os.path.join(self.download_path, self.username)\r\n already_downloaded = []\r\n successful_downloads = []\r\n failed_downloads = []\r\n if not os.path.exists(download_path):\r\n os.makedirs(download_path)\r\n elif not os.path.isdir(download_path):\r\n raise NotADirectoryError(\"Download path is not a directory: \" + download_path)\r\n elif self.skip_downloaded:\r\n for item in os.listdir(download_path):\r\n file_path = str(os.path.join(download_path, item))\r\n if os.path.isfile(file_path):\r\n parsed_file = self._parse_file_name(os.path.basename(file_path))\r\n if parsed_file is not None:\r\n already_downloaded.append(parsed_file[\"id\"])\r\n for index, item in enumerate(self.videos):\r\n # Don't download it if the user has set that option, and the tiktok already exists on the disk\r\n if item[\"id\"] in already_downloaded:\r\n logger.info(\"Already downloaded video with id: \" + item[\"id\"])\r\n continue\r\n file_name = self._format_file_name(item[\"createTime\"], item[\"id\"])\r\n file_path = os.path.join(download_path, file_name)\r\n logger.info(\"Downloading video: \" + file_name + \" (\" + str(index + 1) + \"/\" + str(len(self.videos)) + \")\")\r\n video_url = self._format_video_url(item)\r\n success = self.download_video(file_path, video_url, item[\"createTime\"])\r\n if success:\r\n successful_downloads.append(video_url)\r\n else:\r\n failed_downloads.append(video_url)\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n logger.info(\"Processed all {} videos\".format(self.video_count))\r\n logger.debug(\"Fallback counter: \" + str(self.fallback_counter))\r\n logger.debug(\"YouTube-dl DownloadError counter: \" + str(self.fallback_counter))\r\n logger.debug(\"Other error counter: \" + str(self.other_error_counter))\r\n return {\"successful_downloads\": successful_downloads,\r\n \"failed_downloads\": failed_downloads,\r\n \"skipped_downloads\": already_downloaded}", "def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()", "async def get_youtube_video(self, ctx, *, query):\n\n if not query:\n return await ctx.send(\"Go on, search something.\")\n\n # Executor for sync function\n video_list = await self.bot.loop.run_in_executor(None, YouTube.sync_get_youtube_video, query)\n\n if not video_list:\n return await ctx.say(f\"Sorry, couldn't find anything for `{query}`\")\n\n # Return top hit\n await ctx.send(f'{video_list[0][\"video_url\"]}')", "def youtubefetch(url,outputp=''):\n\toutputpath = os.path.expanduser(outputp)\n\tif (os.path.exists(outputpath) & os.path.isdir(outputpath)) != True:\n\t\toutputpath = '/tmp/'\n\t\n\t(_,_,urlproper) = url.partition(\"?\")\n\t(urlproper,_,_) = urlproper.partition(\"&\")\n\turlproper = \"http://proxy.cs.tcd.ie:8080/www.youtube.com/watch?\" + urlproper\n\tpage = urllib2.urlopen(url).readlines()\n\tfilteredpage = [ elem for elem in page if elem.find(\"fullscreenUrl\") != -1 ]\n\tif (len(filteredpage) == 0):\n\t\treturn 'failed'\n\t\t\n\tfilteredpage = filteredpage[0]\n\t(_, p1, partialurl) = filteredpage.partition(\"video_id=\")\n\t(partialurl , _, name) = partialurl.rpartition(\"&title=\")\n\t(name,_,_) = name.partition(\"'\")\n\tvideourl = \"http://www.youtube.com/get_video.php?\" + p1 + partialurl\n\tvideo = urllib2.urlopen(videourl).read()\n\t#print videourl\n\t#print name\n\toutputfile = open((outputpath+name+\".flv\"),'wb')\n\toutputfile.write(video)\n\toutputfile.flush()\n\toutputfile.close()\n\treturn outputpath+name+\".flv\"", "def yt_url(url, print_title=0):\n try:\n p = pafy.new(url)\n\n except (IOError, ValueError) as e:\n g.message = c.r + str(e) + c.w\n g.content = g.content or generate_songlist_display(zeromsg=g.message)\n return\n\n g.browse_mode = \"normal\"\n v = Video(p.videoid, p.title, p.length)\n g.model.songs = [v]\n\n if not g.command_line:\n g.content = generate_songlist_display()\n\n if print_title:\n xprint(v.title)", "async def youtube(self, ctx, *, query):\n url = f\"https://www.googleapis.com/youtube/v3/search?part=snippet&q={query}&type=video&maxResults=1&key={google_api_key}\"\n response = requests.get(url)\n try:\n await ctx.send(\n f\"https://www.youtube.com/watch?v={response.json()['items'][0]['id']['videoId']}\"\n )\n except IndexError:\n await ctx.send(\"**No results for given query found.**\")", "def download_video_url(\n video_url: str,\n pipeline: PipelineContext,\n destination=\"%(title)s.%(ext)s\",\n progress=ProgressMonitor.NULL,\n):\n\n config = pipeline.config\n logger = logging.getLogger(__name__)\n logger.info(\"Starting video download from URL: %s\", video_url)\n\n # Setup progress-tracking\n progress.scale(total_work=1.0)\n progress_tracker = YDLProgressTracker(show_progress_bar=True)\n\n # Resolve destination path template\n output_template = complete_template(config.sources.root, destination)\n logger.info(\"Output template: %s\", output_template)\n\n ydl_opts = {\n \"format\": \"mp4\",\n \"logger\": YDLLogger(logger),\n \"progress_hooks\": [progress_tracker.hook],\n \"outtmpl\": output_template,\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n # Determine destination file name\n video_info = ydl.extract_info(video_url, download=False)\n file_name = ydl.prepare_filename(video_info)\n logger.info(\"Downloading file: %s\", file_name)\n\n # Download file\n with progress_tracker.track(progress):\n ydl.download([video_url])\n\n progress.complete()\n return file_name", "def download_vid(vid_link, quality_num=None):\r\n if quality_num is not None:\r\n # if quality_num provided\r\n try:\r\n os.system(\"youtube-dl -f \"+str(quality_num)+\" \\'\"+str(vid_link)+\"\\'\")\r\n except Exception:\r\n print(Exception)\r\n else:\r\n # by default the best quality is downloaded\r\n try:\r\n os.system(\"youtube-dl \"+str(vid_link))\r\n except Exception:\r\n print(Exception)", "def url(yt_id: str) -> str:\n return \"https://www.youtube.com/watch?v={}\".format(yt_id)", "def fetch_youtube_url(search_term, dev_key=None):\r\n in_cache, video_id = check_if_in_cache(search_term)\r\n if in_cache:\r\n return YOUTUBE_VIDEO_URL + video_id\r\n if not dev_key:\r\n YOUTUBE_SEARCH_BASE = \"https://www.youtube.com/results?search_query=\"\r\n try:\r\n response = requests.get(YOUTUBE_SEARCH_BASE + search_term).content\r\n html_response = html.fromstring(response)\r\n video = html_response.xpath(\"//a[contains(@class, 'yt-uix-tile-link')]/@href\")\r\n video_id = re.search(\"((\\?v=)[a-zA-Z0-9_-]{4,15})\", video[0]).group(0)[3:]\r\n log.debug(f\"Found video id {video_id} for search term {search_term}\")\r\n _ = save_to_cache(search_term=search_term, video_id=video_id)\r\n return YOUTUBE_VIDEO_URL + video_id\r\n except AttributeError as e:\r\n log.warning(f\"Could not find scrape details for {search_term}\")\r\n capture_exception(e)\r\n return None\r\n except IndexError as e:\r\n log.warning(f\"Could not perform scrape search for {search_term}, got a different HTML\")\r\n capture_exception(e)\r\n return None\r\n else:\r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n developerKey=dev_key,\r\n cache_discovery=False)\r\n try:\r\n in_cache, video_id = check_if_in_cache(search_term)\r\n\r\n if not in_cache:\r\n search_response = youtube.search().list(q=search_term,\r\n part='id, snippet').execute()\r\n for v in search_response['items']:\r\n if v['id']['kind'] == VIDEO:\r\n video_id = v['id']['videoId']\r\n log.debug(f\"Adding Video id {video_id}\")\r\n _ = save_to_cache(search_term=search_term, video_id=video_id) \r\n return YOUTUBE_VIDEO_URL + video_id\r\n except HttpError as err:\r\n err_details = loads(err.content.decode('utf-8')).get('error').get('errors')\r\n secho(\"Couldn't complete search due to following errors: \", fg='red')\r\n for e in err_details:\r\n error_reason = e.get('reason')\r\n error_domain = e.get('domain')\r\n error_message = e.get('message')\r\n\r\n if error_reason == 'quotaExceeded' or error_reason == 'dailyLimitExceeded':\r\n secho(f\"\\tYou're over daily allowed quota. Unfortunately, YouTube restricts API keys to a max of 10,000 requests per day which translates to a maximum of 100 searches.\", fg='red')\r\n secho(f\"\\tThe quota will be reset at midnight Pacific Time (PT).\" ,fg='red')\r\n secho(f\"\\tYou can request for Quota increase from https://console.developers.google.com/apis/api/youtube.googleapis.com/quotas.\", fg='red')\r\n else:\r\n secho(f\"\\t Search failed due to {error_domain}:{error_reason}, message: {error_message}\")\r\n return None", "def download_video(video_url, output_path, output_name=\"\", default_type=\"mp4\", verbose=False):\n try:\n if \".\" not in output_name:\n output_name = f\"{output_name}.{default_type}\"\n output_path = os.path.join(output_path, output_name)\n api_response = core.get_request_with_retries(video_url)\n core_utils.print_if_verbose('Processing...', verbose)\n f = open(output_path, 'wb')\n for chunk in api_response.iter_content(chunk_size=255):\n # filter out keep-alive new chunks\n if chunk:\n f.write(chunk)\n core_utils.print_if_verbose(f'The video has been exported here: {output_path}', verbose)\n f.close()\n except Exception as exception_msg:\n print(f\"The video could not be downloaded due to the following error: {exception_msg}\")\n return", "def get_videos_urls(author):\n\tfoundAll = False\n\tind = 1\n\tvideos = []\n\twhile not foundAll:\n\t inp = urllib.urlopen(r'http://gdata.youtube.com/feeds/api/videos?start-index={0}&max-results=50&alt=json&orderby=published&author={1}'.format( ind, author ) )\n\t try:\n\t resp = json.load(inp)\n\t inp.close()\n\t returnedVideos = resp['feed']['entry']\n\t for video in returnedVideos:\n\t videos.append( video['link'][0]['href'] ) \n\n\t ind += 50\n\t if ( len( returnedVideos ) < 50 ):\n\t foundAll = True\n\t except:\n\t #catch the case where the number of videos in the channel is a multiple of 50\n\t print \"error\"\n\t foundAll = True\n\n\treturn videos", "def download_audio_from_youtube(youtube_link: str):\r\n with st.spinner(\"Extracting audio from Youtube...\"):\r\n try:\r\n a = pytube.YouTube(youtube_link).streams.first().download('files/','video_for_audio') # Download video from youtube\r\n b = ffmpg.ffmpeg_extract_audio('files/video_for_audio.mp4','files/audio.mp3') # extract sound and save as mp3\r\n os.remove('files/video_for_audio.mp4') # remove unecessary video\r\n # Release the process from the downloaded files\r\n del a, b\r\n st.success(\"Sound was extracted successfully from the youtube video!\")\r\n except:\r\n st.error(\"Unexpected error has occured, please try again!\")", "def get_youtube_video_url(video_id):\n url = \"https://www.youtube.com/watch?v=\" + video_id\n return url", "def download_songs(playlist_url):\n command_string = 'youtube-dl -x --audio-format wav --postprocessor-args \"-ar 44100 -ac 1\" --output \"Songs/%(' \\\n 'title)s_%(id)s.%(ext)s\" ' + \\\n playlist_url\n args = shlex.split(command_string)\n subprocess.call(args)", "def download(video_identifier,\n output_filename,\n num_attempts=5,\n url_base='https://www.youtube.com/watch?v='):\n # Defensive argument checking.\n assert isinstance(video_identifier, str), 'video_identifier must be string'\n assert isinstance(output_filename, str), 'output_filename must be string'\n assert len(video_identifier) == 11, 'video_identifier must have length 11'\n\n status = False\n\n if not os.path.exists(output_filename):\n command = [\n 'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',\n '-f', 'mp4', '-o',\n '\"%s\"' % output_filename,\n '\"%s\"' % (url_base + video_identifier)\n ]\n command = ' '.join(command)\n print(command)\n attempts = 0\n while True:\n try:\n subprocess.check_output(\n command, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError:\n attempts += 1\n if attempts == num_attempts:\n return status, 'Fail'\n else:\n break\n # Check if the video was successfully saved.\n status = os.path.exists(output_filename)\n return status, 'Downloaded'", "async def youtube(self, ctx, *, query):\r\n\r\n utub = 'https://youtube.com/results?search_query='\r\n url = utub + query.replace(\" \", \"+\")\r\n r = requests.get(url).text\r\n num1 = r.find('{\"videoRenderer')\r\n num2 = r.find('{\"videoRenderer', num1+1)\r\n # print (num1)\r\n # print (num2)\r\n videoRenderer = (json.loads(r[num1:num2-1])[\"videoRenderer\"])\r\n vid = (videoRenderer[\"videoId\"])\r\n page = (\"https://youtube.com/watch?v=\" + vid)\r\n await ctx.send(page)", "def test_task_video_download(url_to_video: str, empty_video_resource: VideoResource):\n download_video(url_to_video, empty_video_resource.id)\n empty_video_resource.refresh_from_db()\n video_instance = empty_video_resource.videos.filter(primary=True).first()\n\n assert empty_video_resource.videos.all()\n assert video_instance.extension == 'mp4'\n assert video_instance.primary\n for item in video_instance.video.open():\n assert item", "def youtube_cmd(ctx, cmd, arg):\n\t\turl = \"http://gdata.youtube.com/feeds/api/videos?q=%s&max-results=3&v=2\" % urllib.parse.quote(arg)\n\t\tr = urllib.request.urlopen(url)\n\t\tr = dom.parse(r)\n\t\t\t\n\t\tresults = int(r.getElementsByTagName(\"openSearch:totalResults\")[0].firstChild.data)\n\n\t\tif results > 0:\n\t\t\tres = min(results, 3)\n\t\t\tctx.reply(\"Results 1-%d out of %s\" % (res, prettyNumber(results)), \"YouTube\")\n\t\telse:\n\t\t\tctx.reply(\"No results found for %s\" % arg, \"YouTube\")\n\n\t\tfor i in r.getElementsByTagName(\"entry\"):\n\t\t\tvid = i.getElementsByTagName(\"id\")[0].firstChild.data\n\t\t\tvid = vid.split(\":\")[-1]\n\n\t\t\tdisplayMeta(ctx, i, vid)", "def do_downloads(filename1=\"og\", filename2=\"lyrical\", video_id=DEFALT_VIDEO_ID):\n original_video_url = youtube_id_to_url(video_id)\n download_from_url(original_video_url, filename1)\n lyrics_video_url = get_lyrics_url(original_video_url)\n download_from_url(lyrics_video_url, filename2)\n\n return filename1, filename2", "def download_videos(blink, save_dir=\"/media\"):\n blink.download_videos(save_dir, since=get_date())", "def get_videos(url):\n videos = []\n if 'cinebix.com' in url:\n resolve_media(url,videos)\n return videos\n \n html = requests.get(url, headers=mozhdr).text\n mlink = SoupStrainer('div', {'class':re.compile('^singcont')})\n videoclass = BeautifulSoup(html, parseOnlyThese=mlink)\n try:\n links = videoclass.findAll('iframe')\n for link in links:\n url = link.get('src')\n resolve_media(url,videos)\n except:\n pass\n\n mlink = SoupStrainer('div', {'class':'entry-excerpt'})\n videoclass = BeautifulSoup(html, parseOnlyThese=mlink)\n try:\n links = videoclass.findAll('iframe')\n for link in links:\n if 'http' in str(link):\n url = link.get('src')\n resolve_media(url,videos)\n except:\n pass\n\n try:\n url = videoclass.p.a.get('href')\n resolve_media(url,videos)\n except:\n pass \n \n return videos", "def download_course_given(self, course_url: str):\n self.course_url = course_url\n self.get_course_page()\n self.get_course_title()\n self.get_course_unit_titles()\n self.get_course_unit_slugs()\n self.get_course_unit_urls()\n\n print(\"\\nGenerating Path Slugs...\\n\")\n self.get_course_all_slugs()\n self.get_course_youtube_ids()\n self.download_course_videos()", "def query_youtube(movie_title):\n #convert movie_title to “percent-encoded” string, then open search\n query_string = urllib.urlencode({\"search_query\" : movie_title + \" trailer\"})\n html_content = urllib.urlopen(\"http://www.youtube.com/results?\" +\n query_string)\n #use regular expressions to find all 11 character videos IDs\n query_results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})',\n html_content.read())\n return \"http://www.youtube.com/watch?v=\" + query_results[0]", "def get_yt_video_id(url):\n\n from urlparse import urlparse, parse_qs\n\n if url.startswith(('youtu', 'www')):\n url = 'http://' + url\n\n query = urlparse(url)\n\n if 'youtube' in query.hostname:\n if query.path == '/watch':\n return parse_qs(query.query)['v'][0]\n elif query.path.startswith(('/embed/', '/v/')):\n return query.path.split('/')[2]\n elif 'youtu.be' in query.hostname:\n return query.path[1:]\n else:\n raise ValueError", "def extract_url_download(update: Update, context: CallbackContext) -> None:\r\n received_text = update.message.text\r\n yt_urls = get_link_text(received_text)\r\n yt_urls_msg = update.message.reply_text(pretty_url_string(yt_urls), disable_web_page_preview=True)\r\n if len(yt_urls) > 0:\r\n for url in yt_urls:\r\n if 'list=' in url:\r\n print(\"dshgshj\")\r\n\t\t\t\t# download_playlist_url(update, context, url)\r\n else:\r\n download_url(update, context, url)\r\n context.bot.delete_message(message_id=yt_urls_msg.message_id, chat_id=yt_urls_msg.chat_id)", "def dl_url(url):\n g.browse_mode = \"normal\"\n yt_url(url)\n\n if len(g.model.songs) == 1:\n download(\"download\", \"1\")\n\n if g.command_line:\n sys.exit()", "def download(pattern):\n query = pattern.lower()\n videolist = getvideolist()\n filename = []\n for video in videolist:\n for value in video.values():\n if query in str(value).lower():\n filename.append(video[\"file\"])\n if filename:\n for name in filename:\n downloadvideo(name)\n else:\n safeprint(\"No video matching the given query was found.\")", "def start(self):\n if not path.exists(self.dir):\n mkdir(self.dir)\n\n try:\n yt = YouTube(self.link)\n ys = yt.streams\n if self.aud == True:\n ysf = ys.get_audio_only()\n elif self.aud == False:\n ysf = ys.get_by_resolution(resolution=self.res)\n ysf.download(self.dir)\n except AttributeError:\n raise Exception(f\"Resolution [{self.res}] is not available for this video\")\n except excep.RegexMatchError:\n raise Exception(\"Invalid link!\")\n except excep.VideoPrivate:\n raise Exception(\"This video is private! Try with a public one\")\n except excep.PytubeError:\n raise Exception(\"Error occured!\")\n except excep.VideoPrivate:\n raise Exception(\"This video is private! Try with a public one\")\n except excep.VideoUnavailable:\n raise Exception(\"This video is unvailable!\")\n except excep.ExtractError:\n raise Exception(\"Error in extractiong the video!\")\n except Exception:\n raise Exception(\"Error occured. Make sure of your connection ;)\")", "def _download_from_url(self, url):\n target_file_name = self.dir + \"/\" + url.split('/')[-1].split('?')[0]\n urllib.urlretrieve (url, target_file_name)", "def get_course_youtube_ids(self):\n\n with ProgressBar() as pb:\n for i, unit_url in zip(\n pb(range(len(self.course_unit_urls)), label=\"Collecting Youtube IDs:\"),\n self.course_unit_urls,\n ):\n unit_url = ROOT_URL + unit_url\n yt_dlp_opts = {\n \"logger\": MyLogger(),\n \"retries\": 20,\n \"ignoreerrors:\": True,\n \"skip_download\": True,\n }\n with yt_dlp.YoutubeDL(yt_dlp_opts) as ydl:\n lessons_counter = 0\n try:\n logging.debug(\n \"Collecting youtube ids for unit:{}\".format(unit_url)\n )\n info_dict = ydl.extract_info(unit_url, download=False)\n for video in info_dict[\"entries\"]:\n video_id = video.get(\"id\", None)\n self.lesson_youtube_ids.append(video_id)\n lessons_counter += 1\n except DownloadError as e:\n logging.debug(\n \"Collecting youtube ids for unit:{}\".format(unit_url)\n )\n info_dict = ydl.extract_info(\n unit_url, download=False, process=False\n )\n for video in info_dict[\"entries\"]:\n video_id = video.get(\"url\", None)\n self.lesson_youtube_ids.append(video_id)\n lessons_counter += 1\n except Exception as e:\n print(\"Youtube-dl: An error occured!\", e)\n sys.exit(1)\n\n self.unit_ids_counter[unit_url] = lessons_counter\n\n logging.info(\"Course - Collected Youtube IDs\")", "def get_videos(channel_name, CLIENT_SECRETS_FILE):\r\n\r\n video_list = []\r\n\r\n MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\"\r\n\r\n YOUTUBE_READONLY_SCOPE = \"https://www.googleapis.com/auth/youtube.readonly\"\r\n YOUTUBE_API_SERVICE_NAME = \"youtube\"\r\n YOUTUBE_API_VERSION = \"v3\"\r\n\r\n flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,\r\n message=MISSING_CLIENT_SECRETS_MESSAGE,\r\n scope=YOUTUBE_READONLY_SCOPE)\r\n\r\n storage = Storage(\"%s-oauth2.json\" % sys.argv[0])\r\n credentials = storage.get()\r\n\r\n if credentials is None or credentials.invalid:\r\n flags = argparser.parse_args()\r\n credentials = run_flow(flow, storage, flags)\r\n\r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n http=credentials.authorize(httplib2.Http()))\r\n\r\n # Retrieve the contentDetails part of the channel resource for the\r\n # authenticated user's channel.\r\n channels_response = youtube.channels().list(\r\n forUsername=channel_name,\r\n part=\"contentDetails\"\r\n ).execute()\r\n\r\n for channel in channels_response[\"items\"]:\r\n # From the API response, extract the playlist ID that identifies the list\r\n # of videos uploaded to the authenticated user's channel.\r\n uploads_list_id = channel[\"contentDetails\"][\"relatedPlaylists\"][\"uploads\"]\r\n\r\n # Retrieve the list of videos uploaded to the authenticated user's channel.\r\n playlistitems_list_request = youtube.playlistItems().list(\r\n playlistId=uploads_list_id,\r\n part=\"snippet\",\r\n maxResults=50\r\n )\r\n\r\n while playlistitems_list_request:\r\n playlistitems_list_response = playlistitems_list_request.execute()\r\n\r\n # Print information about each video.\r\n for playlist_item in playlistitems_list_response[\"items\"]:\r\n title = playlist_item[\"snippet\"][\"title\"]\r\n video_id = playlist_item[\"snippet\"][\"resourceId\"][\"videoId\"]\r\n video_list.append((title, video_id, 'https://img.youtube.com/vi/' + video_id + '/0.jpg'))\r\n\r\n playlistitems_list_request = youtube.playlistItems().list_next(\r\n playlistitems_list_request, playlistitems_list_response)\r\n\r\n return(video_list)", "def import_youtube_data(_youtube_url):\n result = list()\n\n for i in range(3):\n response = get_verified_response(_youtube_url + '&page={}'.format(i)) # Get server response from url request\n if response is None:\n continue\n # print(response.data)\n soup = BeautifulSoup(response.data, 'html.parser')\n for vid in soup.findAll('a', attrs={'class': 'yt-uix-tile-link'}): # Find all <a> tags on page\n result.append('https://www.youtube.com' + vid['href']) # Extracting web links using 'href' property\n\n print('Length before filter: {}'.format(len(result)))\n result = filter_watch_only(result)\n result = filter_thumbnail_only(result)\n print('Length after filter: {}'.format(len(result)))\n return result", "def get_all_playlist_videos( playlistURL ):\r\n \r\n request = youtube.playlistItems().list(\r\n part=\"contentDetails,id,snippet\",\r\n maxResults=50,\r\n playlistId=\"PLxgoClQQBFjgTMrhvedWk8Q_CVLWwy3ak\"\r\n )\r\n response = request.execute()", "def download_urls(urls_filename, reverse=True, log_filename='youtube-playlist-download.log'):\n urls_file = open(urls_filename)\n url_lines = urls_file.read().splitlines();\n urls_file.close()\n if reverse:\n url_lines = reversed(url_lines)\n\n logfile = open(log_filename, 'w')\n logfile.write('\\n' + str(datetime.now()) + '\\n')\n logfile.flush()\n\n # use -f best to avoid merging and just get the best overall format (saves time)\n youtube_cmd_with_args = ['youtube-dl', '--ignore-errors', '--ignore-config', '--write-info-json', '--no-mtime', '-f best', '-o ' + get_full_filename()]\n\n try:\n for line in url_lines:\n url_id, title = line.split('\\t')[:2]\n print('Downloading video: \"' + title + '\" with id \"' + url_id + '\"')\n run(youtube_cmd_with_args + [YT_PREFIX + url_id])\n print('Done downloading url:', url_id)\n notify('Done downloading url:' + url_id)\n logfile.write('Downloaded\\t' + url_id + '\\t' + title + '\\n')\n logfile.flush()\n except KeyboardInterrupt as e:\n print(\"Exiting\")\n logfile.close()\n\n logfile.close()", "def _get_video_from_html(self, results_page, verbose=False):\n d = json.loads(results_page.text)\n for record in d['data']['records']:\n video_url = record['videoUrl']\n if verbose:\n print \"Video url: \" + video_url\n self._download_from_url(video_url)", "def download_whole(no_interval):\n print(os.getcwd())\n SAVE_PATH = 'tmp'\n ydl_opts = {\"nocheckcertificate\": True, \"noplaylist\": True,\n 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'}\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n for video in range(len(no_interval)):\n try:\n ydl.download([no_interval[video]])\n except youtube_dl.utils.ExtractorError or youtube_dl.utils.DownloadError:\n print(f\"Couldn't download {no_interval[video]}\")\n continue", "def extract_units(url, headers):\n _print(\"Processing '%s'...\" % url)\n page = get_page_contents(url, headers)\n\n re_splitter = re.compile(r'data-streams=(?:&#34;|\").*1.0[0]*:')\n re_subs = re.compile(r'data-transcript-translation-url=(?:&#34;|\")([^\"&]*)(?:&#34;|\")')\n re_available_subs = re.compile(r'data-transcript-available-translations-url=(?:&#34;|\")([^\"&]*)(?:&#34;|\")')\n re_units = re_splitter.split(page)[1:]\n units = []\n for unit_html in re_units:\n video_id = unit_html[:YOUTUBE_VIDEO_ID_LENGTH]\n sub_urls = {}\n match_subs = re_subs.search(unit_html)\n if match_subs:\n match_available_subs = re_available_subs.search(unit_html)\n if match_available_subs:\n available_subs_url = BASE_URL + match_available_subs.group(1)\n try:\n available_subs = get_page_contents_as_json(available_subs_url, headers)\n except HTTPError:\n available_subs = ['en']\n\n for sub_prefix in available_subs:\n sub_urls[sub_prefix] = BASE_URL + match_subs.group(1) + \"/\" + sub_prefix + \"?videoId=\" + video_id\n\n video_youtube_url = 'https://youtube.com/watch?v=' + video_id\n units.append(Unit(video_youtube_url=video_youtube_url,\n sub_urls=sub_urls))\n\n # Try to download some extra videos which is referred by iframe\n re_extra_youtube = re.compile(r'//w{0,3}\\.youtube.com/embed/([^ \\?&]*)[\\?& ]')\n extra_ids = re_extra_youtube.findall(page)\n for extra_id in extra_ids:\n video_youtube_url = 'https://youtube.com/watch?v=' + extra_id[:YOUTUBE_VIDEO_ID_LENGTH]\n units.append(Unit(video_youtube_url=video_youtube_url))\n\n return units", "async def download_video(event):\n url = event.pattern_match.group(1)\n rmsg = await event.get_reply_message()\n if not url and rmsg:\n myString = rmsg.text\n url = re.search(\"(?P<url>https?://[^\\s]+)\", myString).group(\"url\")\n if not url:\n return await edit_or_reply(event, \"What I am Supposed to find? Give link\")\n codevent = await edit_or_reply(event, \"`Preparing to download...`\")\n reply_to_id = await reply_id(event)\n ytdl_data = await ytdl_down(codevent, video_opts, url)\n if ytdl_down is None:\n return\n f = pathlib.Path(f\"{ytdl_data['title']}.mp4\".replace(\"|\", \"_\"))\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.jpg\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.webp\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = None\n await codevent.edit(\n f\"`Preparing to upload video:`\\\n \\n**{ytdl_data['title']}**\\\n \\nby *{ytdl_data['uploader']}*\"\n )\n ul = io.open(f, \"rb\")\n c_time = time.time()\n attributes, mime_type = await fix_attributes(f, ytdl_data, supports_streaming=True)\n uploaded = await event.client.fast_upload_file(\n file=ul,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(d, t, codevent, c_time, \"upload\", file_name=f)\n ),\n )\n ul.close()\n media = types.InputMediaUploadedDocument(\n file=uploaded,\n mime_type=mime_type,\n attributes=attributes,\n thumb=await event.client.upload_file(codthumb) if codthumb else None,\n )\n await event.client.send_file(\n event.chat_id,\n file=media,\n reply_to=reply_to_id,\n caption=ytdl_data[\"title\"],\n )\n os.remove(f)\n if codthumb:\n os.remove(codthumb)\n await event.delete()", "async def download(self, ctx, *, song):\n try:\n with youtube_dl.YoutubeDL(ytdl_download_format_options) as ydl:\n if \"https://www.youtube.com/\" in song:\n download = ydl.extract_info(song, True)\n else:\n infosearched = ydl.extract_info(\n \"ytsearch:\"+song, False)\n download = ydl.extract_info(\n infosearched['entries'][0]['webpage_url'], True)\n filename = ydl.prepare_filename(download)\n embed = discord.Embed(\n title=\"Your download is ready\", description=\"Please wait a moment while the file is beeing uploaded\")\n await ctx.send(embed=embed, delete_after=30)\n await ctx.send(file=discord.File(filename))\n os.remove(filename)\n except (youtube_dl.utils.ExtractorError, youtube_dl.utils.DownloadError):\n embed = discord.Embed(title=\"Song couldn't be downloaded\", description=(\"Song:\"+song))\n await ctx.send(embed=embed)", "def download_all_videos(self, dl_limit=10):\r\n counter = dl_limit\r\n self.video_link_title_keylist = self.video_link_title_dict.keys()\r\n music = []\r\n for title in self.video_link_title_keylist:\r\n try:\r\n title = title.encode('ascii')\r\n # print 'downloading title with counter: ', counter\r\n if not counter:\r\n return random.choice(music) #some margin for randomness, first result isnt always accurate, (gets slower...)\r\n print 'downloading title: ', title\r\n\r\n self.add_result(\"Dowloaded_Song\", title)\r\n\r\n path = self.download_video(self.video_link_title_dict[title], title)\r\n music.append(path)\r\n counter = counter - 1\r\n except:\r\n print \"illegal characters in youtube name\" + title + \"\\n trying next result\"", "async def video(ctx, message):\n \"\"\":param: ctx\"\"\"\n \"\"\":param: message\"\"\"\n \"\"\"return video url\"\"\"\n link_list = []\n print ('Searching YouTube for: %s' % message)\n url = \"https://www.youtube.com/results?search_query=\" + message\n response = urlopen(url)\n html = response.read()\n soup = BeautifulSoup(html, \"lxml\")\n for vid in soup.findAll(attrs={'class': 'yt-uix-tile-link'}):\n link_list.append('https://www.youtube.com' + vid['href'])\n if(len(link_list) >=1):\n random_num = random.randint(0, len(link_list) - 1)\n await bot.say(link_list[random_num])\n else:\n await bot.say(\"there is no contente for \"+message)", "def download_mp3(url, destination=\"./\"):\n options = {}\n task_status = {}\n\n def progress_hook(status):\n task_status.update(status)\n\n options.update(_YOUTUBEDL_OPTS_)\n options[\"progress_hooks\"] = [progress_hook]\n options[\"outtmpl\"] = os.path.join(destination, \"%(title)s.%(ext)s\")\n with youtube_dl.YoutubeDL(options) as youtube:\n youtube.download([url])\n filename = task_status[\"filename\"]\n # BUG: filename extension is wrong, it must be mp3\n filename = filename[: filename.rindex(\".\") + 1]\n return filename + options[\"postprocessors\"][0][\"preferredcodec\"]", "def getDownload(self, html, episode_number):\n soup = BeautifulSoup(html, \"html.parser\")\n download = soup.find_all('source')\n if download:\n self.downloads[\"Episode %s.mp4\" % str(episode_number)] = download[0]['src']\n return\n\n print(\"[!] Download link not found for episode %s\" % str(episode_number))", "def __init__(self, url, params=None):\n super(YoutubeVideo, self).__init__(url, params)\n self.video_id = self.get_video_id()", "def _get_youtube_data_url(self):\n # Need to put a check because in some cases the URL is already passed\n # by the playlist extractor.\n if self.title == \"\":\n self.title = get_youtube_title(self.URL)\n self.stream_url = grab_link(self.URL)", "def download_by_link(link: str, videoid: str) -> [str, str]:\n\t# set youtube_dl arguments \n\tydl_opts = {\n\t\t'quiet': False, # don't write in output\n\t\t'no_warnings': True, # write warnings in output\n\t\t'format': \"bestaudio/best\", # download best audio quality\n\t\t'format': 'mp4', # setup format webm\n\t\t'outtmpl': '%(name)s' + str(videoid) + '.%(ext)s', # setup output name \n\t\t'postprocessor': [{ # dk how this need work, but if this not setup audio didn't download\n\t\t\t'key': \"FFmpegExtractAudioPP\",\n\t\t\t'preferredquality': \"512\",\n\t\t }],\n\t}\n\t# start download audio\n\twith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n\t\tdata = ydl.extract_info(link) # exctrat info about audio\n\tfake_name = \"NA\" + str(videoid)\n\t# TODO: think about this query \n\t# refactoring title \n\ttitle = data.pop('title')\n\ttitle = re.sub(r'[^\\w]', ' ', title)\n\ttitle = translate(title)\n\ttitle = title.replace(' ', '_')\n\treturn fake_name, title", "def add_videos(playlist):\n surl = playlist['link']\n # 작은 playlist의 url을 surl에 저장\n soup = get_soup(surl)\n # 작은 플레이리스트의 html 파싱하여 soup에 저장\n print(f\" getting videos for playlist: {playlist['title']}\")\n\n videos = []\n\n # items are list of video a links from list\n items = soup('a', class_='yt-uix-tile-link')\n # a 태그의 class가 'yt-uix-tile-link'인 태그 items에 저장\n # items는 작은 플레이리스트의 동영상 목록들임\n\n # note first part of look get info from playlist page item,\n # and the the last part opens the video and gets more details\n if len(items) > 0:\n for i in items:\n # 각각의 items i에 하나씩 저장\n d = dict()\n vurl = fix_url(i['href'])\n # 동영상 url을 vurl에 저장\n t = i.find_next('span', {'aria-label': True})\n # 동영상의 span 태그 중 aria=label값이 존재하는 것 t에 저장\n # t는 동영상의 재생 시간임\n d['time'] = t.text if t else 'NA'\n # d 딕셔너리에 t저장\n\n d.update(parse_video(vurl))\n videos.append(d)\n # videos에 d를 append\n\n else: # must be only one video\n d = {'time': 'NA'}\n d.update(parse_video(surl))\n videos.append(d)\n\n # add new key to this playlist of list of video infos\n playlist['videos'] = videos\n print()", "def sync_get_youtube_video(query):\n return yt.get_video_info(query, num_results=1)", "def get_video_info(url):\n ydl = youtube_dl.YoutubeDL()\n ydl.add_default_info_extractors()\n\n try:\n return ydl.extract_info(url, download=False)\n except youtube_dl.DownloadError:\n return None", "def get_yt_link(self, video_id, url=None, skip_errors=False):\n if self._settings.enable_yt_dl and url:\n if not self._yt_dl:\n self._yt_dl = YouTubeDL.get_instance(self._settings, self._callback)\n if not self._yt_dl:\n raise YouTubeException(\"yt-dlp initialization error.\")\n return self._yt_dl.get_yt_link(url, skip_errors)\n\n return self.get_yt_link_by_id(video_id)", "def downloadvideo(filename):\n url = \"http://openings.moe/video/\" + filename\n f = getfile(url)\n safeprint(Colors.PURPLE + url + Colors.END + \":\\nSaving to --> \" + Colors.YELLOW + filename + Colors.END)\n with open(os.path.basename(url), \"wb\") as local_file:\n try:\n local_file.write(f.read())\n except IOError as e:\n safeprint(\"An error occurred while saving the file, try again. \" + str(e))", "def movieid_first_video_url(self, movie_id):\n YOUTUBE_URL = \"https://www.youtube.com/watch?v=\"\n VIDEOS_URL = \"https://api.themoviedb.org/3/movie/%s/videos\"\n url_with_movieid = VIDEOS_URL % (movie_id)\n parm_dict = {\"api_key\": self.api_key, \"language\": self.language}\n url = url_with_movieid + \"?\" + urlencode(parm_dict, doseq=True)\n # print url\n\n response = requests.get(url)\n json_dict = json.loads(response.text)\n response.close()\n\n youtube_video_key = json_dict['results'][0]['key']\n return YOUTUBE_URL + youtube_video_key", "def soup_process_video(input_url):\r\n # scrape the url\r\n fp = urllib.request.urlopen(input_url)\r\n #read bytes\r\n mybytes = fp.read()\r\n mystr = mybytes.decode(\"utf8\")\r\n fp.close()\r\n soup = BeautifulSoup(mystr,'html.parser')\r\n return (soup.find(\"a\", {'class': \"download-btn\"}).get('href'))", "def get_youtube_url(search_term):\n position = 1\n while True:\n try:\n info_dict = get_ydl_dict(search_term, position)\n if dict_is_song(info_dict):\n break\n except Exception as e:\n logger.error(\n \"Error extracting youtube search %s, position %s : %s.\",\n search_term,\n position,\n e,\n )\n if position > 4:\n # Too many wrong results\n return None\n position += 1\n return info_dict[\"webpage_url\"]", "def play_trailer(self):\n webbrowser.open(self.youtube_trailer_url)", "def external_download(song, filename, url):\n cmd = Config.DOWNLOAD_COMMAND.get\n ddir, basename = Config.DDIR.get, os.path.basename(filename)\n cmd_list = shlex.split(cmd)\n\n def list_string_sub(orig, repl, lst):\n \"\"\" Replace substrings for items in a list. \"\"\"\n return [x if orig not in x else x.replace(orig, repl) for x in lst]\n\n cmd_list = list_string_sub(\"%F\", filename, cmd_list)\n cmd_list = list_string_sub(\"%d\", ddir, cmd_list)\n cmd_list = list_string_sub(\"%f\", basename, cmd_list)\n cmd_list = list_string_sub(\"%u\", url, cmd_list)\n cmd_list = list_string_sub(\"%i\", song.ytid, cmd_list)\n dbg(\"Downloading using: %s\", \" \".join(cmd_list))\n subprocess.call(cmd_list)", "def main(csv_path):\n with open(csv_path, newline='') as csvFile:\n csv_object = csv.reader(csvFile, delimiter=',')\n\n for row in csv_object:\n # Get URL\n # Check that the URL contains correct domain, otherwise add it\n url_regex = r\"v=([^&]+)\"\n youtube_url = \"https://youtu.be/\"\n match = re.search(url_regex, row[0])\n\n if match:\n video_ref = match.group(1)\n else:\n video_ref = row[0]\n\n video_url = youtube_url + video_ref\n\n try:\n download_video(video_url, video_ref, row)\n except Exception as e:\n click.secho('Error: '+e.message, fg=\"red\")\n\n return None", "def download(self, language, filename, filetype):\n if language not in self.languages.keys():\n print \"Theres's no subtitle in this language\"\n sys.exit()\n url = \"http://www.youtube.com/api/timedtext?v={0}&lang={1}\".format(self.video_id, language)\n self.subtitle = urllib2.urlopen(url)\n if filetype == \"srt\":\n self.writeSRTFile(filename)\n else:\n self.writeXMLFile(filename)", "def download(df_shorter,folderName):\n os.mkdir(str(folderName))\n path = os.getcwd()+'\\\\'+str(folderName)+'\\\\'\n #add column with video link generated from IDs\n df_shorter['urls'] = df_shorter['id'].apply(lambda x: generateLinkFromId(x))\n vid_dl = []\n i = 1\n for url in df_shorter['urls']:\n if url != False:\n name = str(i)+'.mp4'\n vid_dl.append(wget.download(url,path+name))#retrun the path of the saved video\n i = i+1\n return vid_dl", "def downloader(thread_num):\n tid = 'Thread ' + numprefix.format(thread_num) + ': '\n for i in range(thread_num, len(self.titles), thread_count):\n title, link = self.titles[i], self.download_urls[i]\n name = vidprefix.format(i) + ' ' + title + '.mp4'\n tries = 0\n while (not os.path.exists(name) or os.path.getsize(name) == 0) \\\n and tries <= trycount:\n if os.path.exists(name): os.remove(name)\n self.log(tid + 'Calling wget for ' + name)\n subprocess.call(['wget', '--output-document=' + name, link])\n tries += 1\n if (not os.path.exists(name) or os.path.getsize(name) == 0):\n self.log(tid + 'wget failed for ' + name)\n else:\n self.log(tid + 'wget successfully downloaded ' + name)", "def dirty_yt_search(keyword):\n yt_url = 'https://www.youtube.com/results'\n search_args = {'search_query': keyword}\n\n resp = requests.get(yt_url, search_args)\n print(resp.text)\n search_results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})', resp.text)\n return 'http://www.youtube.com/watch?v=' + search_results[0]", "def download_video(video_stream):\n global file_size\n file_size = size_in_mb(video_stream.filesize)\n home_dir = os.environ['HOME']\n path = f'{home_dir}/Downloads/Video'\n print('-'*60)\n print(f'Filename:\\t{video_stream.title}')\n print(f'Location:\\t{path}')\n print(f'Size:\\t\\t{file_size} MB\\n')\n\n filename = video_stream.title + '_video.mp4'\n filename = filename.replace('/', ' ')\n filename = filename.replace('\\\\', ' ')\n\n if os.path.exists(os.path.join(path, filename)):\n print(\"The file has been already downloaded.\")\n sys.exit()\n \n video_stream.download(path, filename)", "def test_get_video_id_from_url(self):\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/embed/DqGwxR_0d1M'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://youtu.be/DqGwxR_0d1M'), 'DqGwxR_0d1M')\n self.assertEqual(\n get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M&feature=youtu.be'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M'),\n 'DqGwxR_0d1M')", "def start(self):\n\n ydl_opts = {}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n while True:\n videos = self.get_videos() # getting list of all videos from file\n print('{} videos to go'.format(len(videos))) # print no. of video remaining\n video = get_first_item(videos) # get next video for downloading\n if video is None: # check if video is there or not\n break\n\n ydl.download([video]) # downloading video\n videos.remove(video) # remove video from list\n self.save_file(videos) # save updated list to file\n\n print('All downloaded')", "def spider(given_url):\n \n url_to_crawl = given_url\n\n source_code = requests.get(url_to_crawl)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text)\n # name = soup.find('h1', {'class': 'pl-header-title'})\n # name = name.string\n # name = str(name)\n # name = name.strip('')\n # fw = open('links of' + name + '.txt', 'w')\n # fw2 = open('names of' + name + '.txt', 'w')\n fw = open('links.txt', 'w')\n fw2 = open('names.txt', 'w')\n for link in soup.findAll('a', {'class': 'pl-video-title-link yt-uix-tile-link yt-uix-sessionlink spf-link '}):\n my_href = 'https://www.youtube.com' + link.get('href')\n title = link.string\n #print(my_href, title)\n fw.write(my_href + '\\n')\n fw2.write(title)\n\n fw.close()\n fw2.close()\n\n \"\"\"Downloading Part\"\"\"\n try:\n os.system(\"youtube-dl --max-quality FORMAT -a links.txt\")\n return\n except:\n print(\"Something went wrong related to downloading\")\n exit(2)", "def get_url():\r\n songs = []\r\n with open(FILE_CONTAINING_URLS) as f:\r\n for line in f:\r\n if not line.startswith(\"#\") and is_web_url(line):\r\n songs.append(line)\r\n\r\n # pick a random song and store it in song variable\r\n song = random.choice(songs)\r\n\r\n url_attempts = []\r\n\r\n for x in range(RETRY_COUNT):\r\n response = requests.get(song)\r\n # check if URL is valid and also make sure video is available\r\n if response.ok and video_is_available(song):\r\n return song\r\n # store failed URL\r\n url_attempts.append(song)\r\n # choose new random song\r\n song = random.choice(songs)\r\n\r\n print(\"Could not access video URLs. Please check network connection\")\r\n print(\"Tried the following URLs before failing:\")\r\n print(\"\\n\".join(url_attempts))\r\n exit(1)", "def tekstowo_youtube_url(source):\n reg = re.compile(r\"var videoID = \\\"(.*)\\\";\")\n try:\n video_id = reg.search(source).group(1)\n except Exception:\n raise Exception(ERROR_STR + '[crawler] cannot find videoID')\n if not video_id:\n raise Exception(ERROR_STR + '[crawler] empty videoID')\n\n return \"https://www.youtube.com/watch?v=\" + video_id", "def download_flickr_video(url, save_root, force_overwrite):\n\n # (Try to) open the URL\n response = urlopen(url)\n # Extract the file extension from the resolved URL\n m = re.match(r'(.*)\\?s=.*', response.url)\n _, ext = os.path.splitext(m.group(1))\n # Build the path to save the video to\n video_meta = parse.parse(VIDEO_URL_FORMAT, url)\n user_id = video_meta['user_id']\n video_id = video_meta['video_id']\n save_path = os.path.join(save_root, f'{user_id}-{video_id}{ext}')\n # Save the video\n if os.path.isfile(save_path) and not force_overwrite:\n raise FileExistsError(f'File already exists at {save_path}')\n else:\n with open(save_path, 'wb') as f:\n shutil.copyfileobj(response, f)\n\n return save_path", "async def play(self, ctx, url):\n server_id = ctx.message.server.id\n requester = ctx.message.author\n #refuse command if we don't know which voice channel to join\n if not self.in_voice(server_id) and not requester.voice.voice_channel:\n await ctx.bot.send_message(ctx.message.channel, \"Dude, get in voice first.\")\n return\n #warn user that the bot won't jump channels while playing\n if self.in_voice(server_id) and not self.user_in_channel(server_id, requester):\n vcname = self.get_server_dict(server_id)['voice'].channel.name\n await ctx.bot.send_message(ctx.message.channel, \"I'm already playing in {}. Get in.\".format(vcname))\n return\n #create ytdl instance\n #set quiet: True if needed\n await ctx.bot.send_typing(ctx.message.channel)\n ytdl = YoutubeDL(self._default_options)\n try:\n info = ytdl.extract_info(url, download=False)\n except DownloadError:\n #url was bullshit\n search_kw = ctx.message.content[5:]\n info = await self._find(ctx.bot, search_kw)\n if not info:\n #no hits\n await ctx.bot.send_message(ctx.message.channel, \"No media found.\")\n if 'entries' in info:\n #it's a playlist\n #just grab the first item\n info = info['entries'][0]\n #at this point info['url'] should point to our preferred format\n download_url = info['url']\n #get media attributes\n title = info.get('title')\n duration = ''\n if info.get('is_live'):\n duration = 'LIVE'\n else:\n seconds = info.get('duration')\n if seconds:\n duration = str(datetime.timedelta(seconds=seconds))\n nick = self.get_nick(requester)\n #add to queue\n self.enqueue(server_id, download_url, title, duration, nick)\n await ctx.bot.send_message(ctx.message.channel, self.format_song_display('+', title, duration, nick))\n #join user's voice channel unless already in voice\n if not self.in_voice(server_id):\n await self._join(ctx.bot, server_id, requester.voice.voice_channel)\n #start playback unless already playing\n if not self.is_playing(server_id):\n await self._play(ctx.bot, server_id)", "def play_youtube(self, media_id):\n pass", "def playlist_videos(playlist_id):\r\n url = PLAYLIST_ITEMS_URL.format(API_KEY, playlist_id)\r\n response = util.web.http_get(url=url, json=True, referer='https://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n video_list = []\r\n # next_page_token = response['json']['nextPageToken']\r\n try:\r\n if 'items' in response['json']:\r\n for item in response['json']['items']:\r\n video_id = item['snippet']['resourceId']['videoId']\r\n details = video_details(video_id)\r\n if details is not None:\r\n info = {\r\n 'type': 'youTube',\r\n 'video_id': video_id,\r\n 'video_title': details['video_title'],\r\n 'video_time': details['video_time']\r\n }\r\n video_list.append(info)\r\n return video_list\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None", "def download_video_feed(feed_url):\n with urlopen(feed_url) as conn:\n data = json.loads(conn.read().decode())\n\n return tuple(\n FeedItem(\n # The ID is part of the text of the string.\n video_id= entry[\"id\"][\"$t\"].rsplit(\":\", 1)[1],\n # The upload time is an ISO UTC date with milliseconds.\n # The milliseconds are apparently always zero.\n upload_time= datetime.datetime.strptime(\n entry[\"published\"][\"$t\"],\n \"%Y-%m-%dT%H:%M:%S.000Z\"\n ),\n title= entry[\"title\"][\"$t\"],\n description= entry[\"media$group\"][\"media$description\"][\"$t\"],\n )\n for entry in data[\"feed\"].get(\"entry\", [])\n )", "def youtube_id_to_url(yt_video_id):\n return 'https://www.youtube.com/watch?v=' + yt_video_id", "async def igvideo(self, ctx, url):\n response = requests.get(url.replace(\"`\", \"\"), headers={\"Accept-Encoding\": \"utf-8\"})\n tree = html.fromstring(response.content)\n results = tree.xpath('//meta[@content]')\n sources = []\n for result in results:\n try:\n if result.attrib['property'] == \"og:video\":\n sources.append(result.attrib['content'])\n except KeyError:\n pass\n if sources:\n await ctx.send(sources[0])\n self.logger.info(misolog.format_log(ctx, f\"Success\"))\n else:\n await ctx.send(\"Found nothing, sorry!\")\n self.logger.warning(misolog.format_log(ctx, f\"Found nothing\"))", "def play_url(url, name):\n video_url = scraper.get_media_url(url)\n if video_url == -1:\n GUI.info_box(u\"Vesen\", u\"Fann ekki upptöku\")\n else:\n player.play(video_url, name)", "def download_vid(item):\n vid_name, vid_id = item\n vid = Video(vid_name, vid_id, resolution='224p')\n vid.download()", "def ytd(title, url ):\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'm4a',\n # 'preferredquality': '192',\n }],\n 'progress_hooks': [download_hook],\n 'outtmpl': title + '.%(ext)s',\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n return(title + \".m4a\")", "def download(self, url, destination):\n fileDownloader = utils.HttpFileDownloader(url, destination)\n fileDownloader.download()", "def download_video(url, fn):\n start_time = time.time()\n\n # Sorry: This is terrible code, but I'm kind of throwing it\n # together as I discover more about it.\n print ' Downloading {0} to {1}'.format(url, fn)\n\n resp = requests.get(url)\n if resp.status_code != 200:\n print ' GAH! MY EYES! {0} kicked up {1}'.format(url, resp.status_code)\n return\n\n rss_url_m = re.search(r'\"(/rss/flash/\\d+)\"', resp.content)\n rss_url = 'http://blip.tv' + rss_url_m.group(0).strip('\"')\n resp = requests.get(rss_url)\n\n rss_content = resp.content\n\n for ending in POSSIBLE_ENDINGS:\n regex = r'\"http://blip.tv[^\"]+?' + ending + '\"'\n\n download_m = re.search(regex, rss_content)\n if not download_m:\n print ' No {0} url found'.format(ending)\n continue\n\n download_url = download_m.group(0).strip('\"')\n print ' Attempting to download {0}'.format(download_url)\n\n try:\n resp = requests.get(download_url, stream=True)\n print ' Downloading {0}'.format(download_url)\n if resp.status_code == 200:\n total_length = int(resp.headers['content-length'])\n\n if os.path.exists(fn + ending) and file_size(fn + ending) == total_length:\n print ' Already downloaded.'\n return\n\n with open(fn + ending, 'w') as fp:\n total_downloaded = 0\n\n tic_chunk = total_downloaded\n tic = time.time()\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n fp.write(chunk)\n fp.flush()\n tic_chunk += len(chunk)\n total_downloaded += len(chunk)\n\n if time.time() - tic > 1:\n with TERM.location(x=0):\n line = ' {0} {1}kbps'.format(\n format_downloaded(total_downloaded, total_length),\n int(tic_chunk / (time.time() - tic) / 1000))\n sys.stdout.write(line + TERM.clear_eol)\n sys.stdout.flush()\n tic_chunk = 0\n tic = time.time()\n print ''\n\n print ' Done! {0} {1}mb {2}'.format(\n fn + ending,\n int(total_length / 1000000.0),\n format_duration(time.time() - start_time))\n return\n\n else:\n print ' HTTP{0}! GAH! SPUTTER!'.format(resp.status_code)\n\n except requests.exceptions.ConnectionError as exc:\n print ' CONNECTIONERROR! GAH! SPUTTER! {0}'.format(exc)\n\n print ' SO MANY FAILURES!'\n raise NoDownloadMeNoLikeyException()", "def make_video_url(movie_id, api_key):\n\n MOVIE_URL = \"https://api.themoviedb.org/3/movie/\"\n LANG = \"&language=en-US\"\n # Find the youtube key for video trailer\n connection = requests.get(MOVIE_URL + str(movie_id) +\n \"/videos?api_key=\" + api_key + LANG)\n videos_json = json.loads(connection.text)\n connection.close()\n\n if connection.status_code != 200:\n # constant in case issue is found with connection....\n return VIDEO_URL + '5PSNL1qE6VY'\n else:\n if len(videos_json['results']) == 0:\n # constant in case no video is found for given movie....\n return VIDEO_URL + '5PSNL1qE6VY'\n else:\n # If all well we get aa video url for all movie\n # based on discovery or discovery by year\n return VIDEO_URL + videos_json['results'][0]['key']", "async def youtube(self, ctx, *search):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if not (ctx.author.voice or voice):\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")\n return\n \n YDL_OPTS = {'default_search': 'auto', 'format': 'bestaudio',\n 'noplaylist': 'True'}\n FFMPEG_OPTS = {'before_options': '-reconnect 1 -reconnect_streamed 1 '\n '-reconnect_delay_max 5',\n 'options': '-vn'}\n \n with youtube_dl.YoutubeDL(YDL_OPTS) as ydl:\n try:\n info = ydl.extract_info(' '.join(search), download=False)\n except youtube_dl.utils.DownloadError:\n await ctx.message.add_reaction('\\U0001F615');\n await ctx.send(f\"Couldn't stream that sound.\")\n return\n\n if 'entries' in info:\n if info['entries']:\n info = info['entries'][0]\n else:\n await ctx.send(f\"No results found for `{search}`.\")\n return\n \n if not voice:\n await ctx.message.add_reaction('\\U0001F615')\n await self.join(ctx)\n\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n \n if voice:\n if voice.is_playing():\n voice.stop()\n \n URL = info['formats'][0]['url']\n title = info['title']\n voice.play(discord.FFmpegPCMAudio(executable=os.environ['FFMPEG_PATH'],\n source=URL, **FFMPEG_OPTS))\n \n await ctx.send(f\"Playing `{title}`.\")" ]
[ "0.7559332", "0.7511336", "0.7269561", "0.7109881", "0.69188035", "0.6849369", "0.6837979", "0.67307305", "0.67147034", "0.6699649", "0.6661852", "0.6603577", "0.6570227", "0.6545135", "0.65282965", "0.6528034", "0.6490521", "0.64205855", "0.63792336", "0.63700604", "0.63433367", "0.63277906", "0.6298527", "0.6272969", "0.62356573", "0.6211464", "0.6199923", "0.6186832", "0.6168526", "0.616394", "0.6121756", "0.6118238", "0.61055785", "0.61009365", "0.6096731", "0.60924286", "0.60746604", "0.6063348", "0.60186833", "0.6013931", "0.5990241", "0.5941902", "0.592355", "0.59027916", "0.58924145", "0.5880168", "0.5871658", "0.58627516", "0.58581376", "0.58542866", "0.5848118", "0.58469605", "0.58441955", "0.5832069", "0.5830507", "0.58195204", "0.5791336", "0.5778909", "0.5767589", "0.5767135", "0.574161", "0.5732235", "0.5707057", "0.568659", "0.5679438", "0.56790936", "0.5663001", "0.56626743", "0.5659062", "0.565889", "0.5650707", "0.5647895", "0.5630932", "0.560571", "0.56024164", "0.5601405", "0.5600421", "0.5596286", "0.5582787", "0.55795485", "0.55716056", "0.55692255", "0.556901", "0.5545642", "0.5529973", "0.5511336", "0.55071557", "0.54950684", "0.5482425", "0.54798764", "0.5467569", "0.5461113", "0.5460923", "0.54539615", "0.54533607", "0.54530257", "0.5444963", "0.5436253", "0.54340583", "0.5434017" ]
0.7409529
2
Check that user_data is a dict and that key is in there
def has_user_data(self, key): return isinstance(self._user_data, dict) and key in self._user_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_for_dict(check):", "def is_valid(data):\n return isinstance(data, dict) \\\n and \"u_id\" in data \\\n and \"token\" in data \\\n and isinstance(data[\"u_id\"], int) \\\n and isinstance(data[\"token\"], str)", "def can_insert(data):\n return isinstance(data, dict)", "def _is_key_value(data):\n if data is None:\n return False\n return all(x in data for x in ['key', 'value'])", "def isDict(data):\n\ttry:\n\t\tfrom types import DictType\n\t\tif type(data) == DictType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type({}):\n\t\t\treturn True\n\treturn False", "def verify_rpc_value ( user_dict ):\n for key in user_dict:\n if not isinstance ( user_dict[ key ], str ):\n # Error code 422\n raise ValueError ( 'Value of {0} is not a string'.format ( key ) )", "def _is_dict(val):\n\n return isinstance(val, dict)", "def sanity_check(cls, data): # no version with ID, since PUT (update) isn't allowed\n data = _dict_sanity_check(data,\n mandatory_keys = [\n (\"user_id\", User.exists),\n (\"customer_id\", Customer.exists)\n ],\n optional_keys = [])\n return data, None", "def verifyData(self, expectedDict):\n pass", "def _is_dict(item):\n return isinstance(item, dict)", "def validate_user_request_dict(request_dict):\n if 'first_name' not in request_dict:\n return False\n if 'last_name' not in request_dict:\n return False\n if 'id' not in request_dict:\n return False\n if 'email' not in request_dict:\n return False\n return True", "def check_user_data_in_response(response_data):\n assert response_data[\"id\"] > 0\n assert response_data[\"name\"] == pytest.test_user.name\n assert response_data[\"email\"] == pytest.test_user.email\n assert response_data[\"gender\"] == pytest.test_user.gender\n assert response_data[\"status\"] == pytest.test_user.status", "def __is_valid_dict(self, GRFData):\n\n if type(GRFData) is not dict:\n raise ValueError(\"Expected GRFData to be of type '{}', but received type '{}'.\".format(type(dict), type(GRFData)))\n\n for component in self.comp_list:\n if component not in GRFData.keys():\n raise ValueError(\"Component '{}' not found in GRFData.\".format(component))", "def is_data_true(data):\n\n if not data:\n return False\n\n if not isinstance(data, dict):\n if not util.get_value_from_health_internal_tuple(data):\n return False\n return True\n\n for _k in data:\n if is_data_true(data[_k]):\n return True\n\n return False", "def dict_support_required(self):\n\t\treturn self.typemanager.has_dicts", "def test_process_dict_true(self):\n\n self.assertIn('userA@domain', self.temp_set)", "def _validate_input_dict(self, input):\n if isinstance(input, dict):\n required = {\"type\", \"value\"}\n not_found = required - set(input.keys())\n if not_found:\n raise SpecificationError(\n \"Required key(s) not found in input dictionary: {}\".format(\n \", \".join(not_found)\n )\n )\n else:\n raise Exception(\"input element has to be a dictionary\")", "def valid_user_data(user_data):\n return 'account_ids' in user_data and 'monthly_expenses' in user_data", "def is_dict(value):\n return isinstance(value, dict)", "def _validate_dict_data(self, expected, actual):\n for k, v in expected.iteritems():\n if k in actual:\n if (isinstance(v, basestring) or\n isinstance(v, bool) or\n isinstance(v, (int, long))):\n if v != actual[k]:\n return \"{}:{}\".format(k, actual[k])\n elif not v(actual[k]):\n return \"{}:{}\".format(k, actual[k])\n else:\n return \"key '{}' does not exist\".format(k)\n return None", "def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)", "def dict_type(verifield, required):\n if verifield is None: return True\n if not isinstance(verifield, dict): return False\n all_of = [value or True for value in verifield.values() if isinstance(value, required) or value is None]\n return not verifield or (all(all_of or [False]) and len(all_of) == len(verifield))", "def check_data_is_format(data):\n try:\n data_lst = data\n if not isinstance(data, list):\n data_lst = json.loads(data)\n\n for data in data_lst:\n if not isinstance(data, dict):\n raise ValueError(\"data contains not dict\")\n\n for key in data.keys():\n check_type(key)\n except ValueError as e:\n logging.error(\"data format check error %s\" % e)\n return False, None\n except Exception as e:\n logging.error(\"data format check unknown error %s\" % e)\n return False, None\n else:\n return True, data_lst", "def is_dict(self) -> bool:\n return True", "def _verfify_auth_and_profiles_data (self, data):\n if type(data.get('profiles')) == dict:\n if len(str(data.get('authURL', ''))) > 10 and len(str(data.get('authURL', ''))) < 50:\n return True\n return False", "def is_typed_dict(self) -> bool:\n return True", "def params_is_valid(data):\n if isinstance(data['title'], str) and isinstance(data['description'], str) and isinstance(data['params'], dict):\n return True\n else:\n return False", "def update_response_json(user, data):\n if type(data) == dict and type(user) == str:\n if user in data.keys():\n data = data[user]\n response_json.response[user] = data\n return response_json.response\n return {'update': 'failed',\n 'reason': 'data provided incorrectly formatted. \\\n the data must be json formatted, and <user> must be of type string'}", "def check_if_nested(data):\n if isinstance(data, dict):\n for k in data:\n if isinstance(data[k], (list, dict)):\n return True\n elif isinstance(data, list):\n for i in data:\n if isinstance(i, (list, dict)):\n return True\n return False", "def verifyDictTypes( template, dictToCheck ):\n for key in dictToCheck:\n if not ( ( isinstance( dictToCheck[ key ], list ) and\n isinstance( template[ key ], list ) ) or\n ( isinstance( dictToCheck[ key ], dict ) and\n isinstance( template[ key ], dict ) ) or\n ( isinstance( dictToCheck[ key ], template[ key ] ) ) ):\n return False\n\n return True", "def _is_key_value_array(self, data):\n for d in data:\n if not self._is_key_value(d):\n return False\n return True", "def __is_key_in_json(self, key=str, json_dict=json):\n if key in json_dict:\n # noinspection PyUnresolvedReferences\n return json_dict[key]\n else:\n return self.NO_KEY_VALUE_FOR_ENTRY", "def validate_dict(data_dict, entity):\r\n fields = []\r\n for key, value in data_dict.items():\r\n if not value:\r\n fields.append(key)\r\n continue\r\n if len(fields) > 0:\r\n return provide_field_value(entity, fields)\r\n elif key == hqAddKey:\r\n status = validate_hqadd(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == logoUrlKey:\r\n status = validate_logourl(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == type_key:\r\n status = validate_officeType(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == name_key:\r\n status = None\r\n if entity == party_key:\r\n status = validate_partyname(value)\r\n elif entity == office_key:\r\n status = validate_officeName(value)\r\n if not status == ok_str:\r\n return status\r\n if fields:\r\n return provide_field_value(entity, fields)\r\n return ok_str", "def is_dictionary(obj):\n return type(obj) is dict", "def is_dict(obj):\n return type(obj) == type({})", "def assertUser(self, user): # camel case to be consistent with unittest\n self.assertIsInstance(user, dict)\n self.assertIn('id', user)\n self.assertIn('nickname', user)", "def _validate_dict_entry(self, dict_entry):\r\n try:\r\n # Type-check all of the type-critical items.\r\n if (\r\n type(dict_entry[\"id\"]) == int and\r\n type(dict_entry[\"date\"]) == datetime.date and\r\n type(dict_entry[\"time\"]) == datetime.time and\r\n type(dict_entry[\"datetime\"]) == datetime.datetime and\r\n type(dict_entry[\"duration\"]) == datetime.timedelta):\r\n return True\r\n else:\r\n return False\r\n # end if\r\n except Exception as err:\r\n _z_exc(\"logentry.py/_validate_dict_entry\", err)\r\n # end try\r", "def _verify_dict_field(self, _dict, name, types):\n if type(types) != list:\n types = [types]\n if str in types and unicode not in types:\n types.append(unicode)\n if unicode in types and str not in types:\n types.append(str)\n self.assertTrue(name in _dict, msg=\"Missing field '%s'\" % name)\n self.assertTrue(type(_dict[name]) in types,\n msg=\"Erroneous type of the field '%s': \"\n \"found %s, expected any of %s\" % (\n name, str(type(_dict[name])), \",\".join([str(x) for x in types])))", "def validate(self, data: Dict):\n for key in self.__dict__.keys():\n if not key.startswith('__') and key != 'id':\n if data[key] == '' or data[key] is None:\n raise ValidationError(\n message=f'{key} should not be \"{data[key]}\"'\n )", "def clean_dict(data: dict) -> None:\n if not isinstance(data, dict):\n logger.warning(f\"Not a dictionary: {type(data)}\")\n return\n\n data.pop(\"_id\", None)\n data.pop(\"password\", None)\n\n return", "def test_good_data(self):\n self.assertNotEqual(self.request.user.email, self.good_dict['email'])\n self.request.json_body = deepcopy(self.good_dict)\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, dict_from_row(self.request.user, remove_fields=removals))\n self.assertEqual(self.request.user.email, self.good_dict['email'])", "def test_json_representation_of_user(new_user):\n user, user_data = new_user\n user_json = user.json()\n\n acceptable_keys = 'id', 'username', 'created_at', 'is_admin'\n unacceptable_keys = 'salt', 'password', 'hashed_password'\n\n assert_that(user_json).contains_value(user_data.get('username'))\n assert_that(user_json).contains_key(*acceptable_keys)\n assert_that(user_json).does_not_contain_key(*unacceptable_keys)", "def match(self, data_instance: Dict[str, Any]) -> bool:", "def _check_fields(self, content: JsonDict) -> None:\n self.assertIn(\"id\", content)\n self.assertIn(\"received_ts\", content)\n self.assertIn(\"room_id\", content)\n self.assertIn(\"event_id\", content)\n self.assertIn(\"user_id\", content)\n self.assertIn(\"sender\", content)\n self.assertIn(\"canonical_alias\", content)\n self.assertIn(\"name\", content)\n self.assertIn(\"event_json\", content)\n self.assertIn(\"score\", content)\n self.assertIn(\"reason\", content)\n self.assertIn(\"auth_events\", content[\"event_json\"])\n self.assertIn(\"type\", content[\"event_json\"])\n self.assertIn(\"room_id\", content[\"event_json\"])\n self.assertIn(\"sender\", content[\"event_json\"])\n self.assertIn(\"content\", content[\"event_json\"])", "def isJWE_unserialized_single(x):\n if isinstance(x, dict) \\\n and (\"unprotected\" in x or \"protected\" in x) \\\n and (\"ciphertext\" in x):\n try:\n if \"protected\" in x:\n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else:\n return False", "def has_value(value):\n return IsDictContainingValue(wrap_matcher(value))", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def verify_object(self, data):\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result", "def is_data_response(resp: Response, data_field: str = None) -> bool:\n\n return \\\n resp and \\\n is_dict(resp) and \\\n is_success_response(resp) and \\\n resp.get(\"data\") is not None and \\\n (not data_field or data_field in resp.get(\"data\"))", "def _import(self, data):\n if isinstance(data, dict):\n if len(data):\n for key in data:\n if data.get(key) is not None:\n if not self.set(key, data.get(key)):\n raise Exception('%s %s icin dogru bir veri degil.' % (data.get(key), key))", "def checkOpsimData(self, otherdata=None):\n data2check = self.opsim_data\n if otherdata:\n data2check = otherdata\n if type(data2check) != list:\n if type(data2check) == dict:\n if set(self._opsim_keys).issubset(set(data2check.keys())):\n return True, 'good'\n else:\n return False, 'Data is not complete'\n else:\n return False, 'Data is not a list nor a dictionary'\n else:\n nodict = 0\n baddict = 0\n for data in data2check:\n if type(data) != dict:\n nodict += 1\n else:\n if not set(self._opsim_keys).issubset(set(data.keys())):\n baddict += 1\n if (nodict == 0 and baddict == 0):\n return True, 'good'\n else:\n statement = '{0} non dictionary element(s) and {1} uncomplete\\\n dictionary(ies) in data list'.format(nodict, baddict)\n return (False, statement)", "def parse_user_dict(self, line):\n pass", "def isJWS_unserialized_single(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and \"signature\" in x \\\n and (\"protected\" in x or \"unprotected\" in x):\n try: \n if \"protected\" in x: \n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else: \n return False", "def handle_input(data: dict):", "def assertContainsDict(self, dictionary, data):\n for key in dictionary:\n self.assertTrue(key in data, msg=\"Data doesn't have key '{}'\".format(key))\n value = dictionary[key]\n value2 = data[key]\n self.assertEqual(value, value2,\n msg=\"key={}, value={} != target={}\".format(key, value, value2))", "def rule_00_config_is_dict(session):\n return isinstance(session[\"config\"], dict)", "def test_input_text_returned_in_response_data(self):\n user_name = \"Ron Obvious\"\n user_input = \"Hello!\"\n\n data = self.chatbot.get_response_data(user_name, user_input)\n\n self.assertIn(user_input, data[\"user\"].keys())", "def has_dict_keys(self, dict_in_pointer):\n if type(dict_in_pointer)==str:\n dict_in_pointer = [dict_in_pointer]\n if type(dict_in_pointer)==dict:\n dict_in_pointer = dict_in_pointer.keys()\n if type(dict_in_pointer)!=list:\n return None\n start = self.head\n while start:\n if dict_in_pointer==start.getMember():\n return start\n start = start.getLink()\n return None", "def missing_data(self, data):\n missing_fields = []\n for key in data:\n if not key in request.json:\n missing_fields.append(key)\n if missing_fields:\n message = 'Missing ' + ', '.join(missing_fields)\n return self.bad_request(message)\n return None", "def hasCustomData( self, key ):\n return str(key) in self._customData", "def _dict_sanity_check(data, mandatory_keys, optional_keys, obj=None):\n # Both mandatory and optional key lists contain tuples, with key name being the first\n # element in each tuple. The validator is not needed for the mandatory / optional key\n # check, so we can ignore it here.\n missing_keys = [k for k, _ in mandatory_keys if k not in data]\n if missing_keys:\n raise ValueError(f\"missing mandatory key(s): {', '.join(missing_keys)}\")\n\n # Create a lookup of key name to validator function. The lookup will have an entry for all\n # possible keys, so we can also use it to check if we have any invalid keys.\n keys_to_validators = dict(mandatory_keys + optional_keys)\n always_allowed = [\"id\", \"_created\", \"_updated\", \"_links\", \"_embedded\"]\n invalid_keys = [k for k in data\n if k not in keys_to_validators and k not in always_allowed]\n if invalid_keys:\n raise ValueError(f\"invalid key(s) in request body: {', '.join(invalid_keys)}\")\n\n # Now we individually call the validators on all values in the data, producing a useful\n # error message if possible\n res = {}\n for key, value in data.items():\n try:\n res[key] = keys_to_validators[key](value)\n except ValueError as ex:\n raise ValueError(f\"key '{key}': {str(ex)}\")\n\n now_time_string = datetime.datetime.now().isoformat()\n if obj is None:\n res['_created'] = now_time_string\n res['_updated'] = now_time_string\n return res", "def test_user_info(user_keys):\n\n pyme_instance = PyMe(206946886)\n response = pyme_instance.display()\n\n assert isinstance(response, dict)\n assert response['id'] == 206946886, \"The ID of the user should be in the response\"\n assert set(user_keys).issubset(response.keys()), \"All keys should be in the response\"", "def everything_is_unicode(d: dict) -> bool:\n for k, v in d.items():\n if isinstance(v, dict) and k != \"headers\":\n if not everything_is_unicode(v):\n return False\n elif isinstance(v, list):\n for i in v:\n if isinstance(i, dict) and not everything_is_unicode(i):\n return False\n elif isinstance(i, bytes):\n return False\n elif isinstance(v, bytes):\n return False\n return True", "def isdictinstance(obj):\n return isinstance(obj, dict) or isinstance(obj, DotDict)", "def test_metadata_fonts_items_dicts(self):\n for x in self.metadata.get('fonts', None):\n self.assertEqual(type(x), type({}), msg=\"type(%s) is not dict\" % x)", "def _check_user_entry(user):\n if \"tenant_name\" in user:\n keys = set(user.keys())\n if keys == {\"username\", \"password\", \"tenant_name\",\n \"project_domain_name\", \"user_domain_name\"}:\n if (user[\"user_domain_name\"] == \"\"\n and user[\"project_domain_name\"] == \"\"):\n # it is credentials of keystone v2 and they were created\n # --fromenv\n del user[\"user_domain_name\"]\n del user[\"project_domain_name\"]\n return True\n else:\n # it looks like keystone v3 credentials\n user[\"project_name\"] = user.pop(\"tenant_name\")\n return True", "def test_dict(self, dictionary: dict) -> None:\r\n if not isinstance(dictionary, dict):\r\n raise ValueError(f'Expected dictionary, but received {type(dictionary)}')\r\n for key, value in dictionary.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if isinstance(value, dict):\r\n self.test_dict(dictionary=value)\r\n elif isinstance(value, list):\r\n self.test_list(items=value)", "def validate_transaction_dict(request_dict):\n if 'id' not in request_dict:\n return False\n if 'user_id' not in request_dict:\n return False\n if 'amount' not in request_dict:\n return False\n if 'date' not in request_dict:\n return False\n return True", "def test_process_dict_false(self):\n\n self.assertNotIn('userB@domain', self.temp_set)", "def validate_dto(cls, data: dict) -> bool:\n\n required_keys = {'signature', 'signer'}\n return (\n cls.validate_dto_required(data, required_keys)\n and cls.validate_dto_all(data, required_keys)\n )", "def isdict(val: Any) -> bool:\n return isinstance(val, MutableMapping)", "def is_sync_service_data(values):\n if not isinstance(values, dict):\n return False\n\n for acr, users in values.items():\n if not isinstance(acr, (str, unicode)) or not isinstance(users, list):\n return False\n\n return True", "def get_if_exist(self, data, key):\n if key in data:\n return data[key]\n return None", "def _check_keys(self, dict, filetype):\n self.filetype = filetype\n for key in dict:\n if isinstance(dict[key], scipy.io.matlab.mio5_params.mat_struct):\n dict[key] = self._todict(dict[key], self.filetype)\n return dict", "def check_dict(dic, validator, messages):\n check_dict_alg(dic, validator, [], messages, validator, \"NoObject\")", "def isJWP_unserialized(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and isinstance(x[\"unprotected\"], dict)\\\n and \"unprotected\" in x and \"signature\" not in x:\n return True\n else:\n return False", "def __getitem__(self, key):\n return self._user_data.get(key)", "def test_dict(self, testdata: TestData) -> None:\n for data in testdata['observation_type']:\n observation_type = ObservationType.from_dict(data)\n assert data == observation_type.to_dict()", "def _check_keys(dict):\n for key in dict.keys():\n if isinstance(dict[key], sio.matlab.mio5_params.mat_struct):\n dict[key] = _todict(dict[key])\n return dict", "def check_data(cls, database, data):\n if not data:\n abort(204, 'No data received')\n entity = {}\n try:\n entity = loads(data)\n except:\n print \"erreur chargement json %s\" % (data,)\n abort(404, 'Error on loading data')\n mapper = inspect(database)\n orm_data = {}\n for column in mapper.attrs:\n if isinstance(column, ColumnProperty) and \\\n not column.columns[0].nullable and \\\n not column.columns[0].foreign_keys and \\\n not column.columns[0].primary_key:\n orm_data[column.key] = column.columns[0].nullable\n else:\n pass\n for field in orm_data.iterkeys():\n if entity.has_key(field):\n #del(orm_data[column])\n pass\n else:\n return False\n return entity", "def validate(self, data):\n # if data['is_private'] and data['contestants']:\n # raise serializers.ValidationError(\"Can not be private and compete for an award.\")\n return data", "def _normalize(empty, user_data):\n if isinstance(user_data, UserData):\n return user_data.to_dict()\n if isinstance(user_data, dict):\n return _dict_to_dict(empty, user_data)\n if isinstance(user_data, (bytes, bytearray)):\n return _bytes_to_dict(empty, user_data)\n if user_data is None:\n return empty\n raise ValueError", "def dict_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (dict, collections.UserDict, collections.abc.MutableMapping)):\n name = type(var).__name__\n raise DictError(\n 'Function {} expected dict, {} got instead.'.format(func, name))", "def is_item_in_the_response(key, value, jsonResponse):\n flag = False\n for item in jsonResponse:\n if type(jsonResponse[item]) == int:\n if item == key and jsonResponse[item] == int(value):\n flag = True\n\n if type(jsonResponse[item]) == str:\n if item == key and jsonResponse[item] == str(value):\n flag = True\n\n if type(jsonResponse[item]) == bool:\n if item == key and jsonResponse[item] == bool(value):\n flag = True\n else:\n #log and error\n pass\n return flag", "def _check_data_type(self, key: str, value: Any):\n allowedDataType = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedDataType\", None)\n if allowedDataType is not None and not isinstance(value, allowedDataType):\n raise Exception(\n f\"Value '{value}' is not of the correct type. The allowed data type is: {allowedDataType.__name__}\"\n )", "def test_postive_get_auth_horizon_check_keys(self):\n r = self.res.get('/auth/config/'+utils.partner, headers=utils.headers)\n logging.info(\"Return response is '%s'\" % r)\n # convert string to dictionary\n rd = ast.literal_eval(r)\n logging.info(\"Return response in dictionary format is '%s'\" % rd)\n self.assertEqual(self.res.response.status, 200)\n keys = ['type', 'web_endpoint', 'client_endpoint', 'org_name']\n self.assertTrue(utils.is_same_array(keys, rd.keys()), \"Keys are not correct!\")", "def has_dict(self, dict_in_pointer):\n if type(dict_in_pointer)!=dict:\n return None\n start = self.head\n while start:\n if dict_in_pointer==start.getMember():\n return start\n start = start.getLink()\n return None", "def check_dictionary(self, dico):\n if dico is not None:\n self.log.info('Check the dictionary')\n test, aff = validate(dico, proto_domain, test_comp = False)\n if test:\n self.log.info(aff)\n else:\n self.log.error(aff)\n sys.exit()", "def test_obj_dict(self):\n obj = storage.all()\n self.assertIsInstance(obj, dict)", "def test_export_data_none(self) -> None:\n user_data = user_models.ExpUserLastPlaythroughModel.export_data(\n self.NONEXISTENT_USER_ID)\n expected_data: Dict[str, Dict[str, str]] = {}\n self.assertEqual(expected_data, user_data)", "def validate(self, data):\n password1 = data.get('password1')\n password2 = data.get('password2')\n\n if password1 is None:\n raise serializers.ValidationError(\"KeyError: password1 does not exist.\")\n\n if password2 is None:\n raise serializers.ValidationError(\"KeyError: password2 does not exist\")\n\n if password1 != password2:\n raise serializers.ValidationError(\"The two password fields didn't match.\")\n return data", "def _is_valid_dict(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:dict\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 2:\n return False\n\n sub_type_1 = sub_types[0]\n sub_type_2 = sub_types[1]\n return _is_valid_pt(sub_type_1) and _is_valid_pt(sub_type_2)", "def _get_user_data(self):\n return {\"key\": self._key}", "def get_user_data(self, key, default=None):\n if not isinstance(self._user_data, dict):\n return default\n return self._user_data.get(key)", "def has_data(self, *args, **kwargs):\n return False", "def test_dict(self, obj: dict) -> None:\r\n properties = read_properties(obj)\r\n for key, value in properties.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if read_type(value) == 'object':\r\n logger.debug('dict -> dict')\r\n self.test_dict(obj=value)\r\n elif read_type(value) == 'array':\r\n logger.debug('dict -> list')\r\n self.test_list(array=value)", "def validate_data(self, data):\n try:\n event_id = data[\"event\"]\n guests = data[\"guests\"]\n section = data.get(\"section\", None)\n except KeyError:\n return False, \"\", \"\", \"\"\n try:\n event = Event.objects.get(pk=event_id)\n except Event.DoesNotExist:\n return False, \"\", \"\", \"\"\n if not isinstance(guests, list):\n return False, \"\", \"\", \"\"\n if section is not None:\n try:\n event.venue.sections.get(pk=section)\n except Section.DoesNotExist:\n return False, \"\", \"\", \"\"\n return True, event, guests, section", "def has(self, key):\n return self.data.get(key, None) is not None", "def check_key(request):\n try:\n access_key = request.session.get('access_key_tw', None)\n if not access_key:\n return False\n except KeyError:\n return False\n return True\n\n\t# User info", "def check_fields_updated_correctly(old_user_data: user_models.User,\n updated_data_json: Dict[str, Any]) -> bool:\n\n updated_user_object = get_user_data_from_user_model(old_user_data)\n updated_user_data_dict = updated_user_object.dict()\n\n # basically just getting intersetion of keys from two dicts\n keys_to_compare = set(updated_user_data_dict).intersection(\n set(updated_data_json))\n\n try:\n is_password = lambda x: x == \"password\"\n for key in keys_to_compare:\n\n if is_password(key):\n password_str = updated_data_json[key]\n assert updated_user_object.check_password(password_str)\n continue\n\n assert updated_data_json[key] == updated_user_data_dict[key]\n return True\n except AssertionError as assert_error:\n logging.warning(f\"failed at: {assert_error}\")\n return False" ]
[ "0.719985", "0.7077056", "0.6867152", "0.6618034", "0.6470574", "0.63971496", "0.63777405", "0.63742024", "0.63429755", "0.63425326", "0.62384546", "0.6238243", "0.61970216", "0.6187313", "0.6175904", "0.61512995", "0.6139445", "0.6129756", "0.610137", "0.6098412", "0.6043313", "0.60295683", "0.6013052", "0.597079", "0.5963954", "0.5947398", "0.5927177", "0.5924112", "0.59203804", "0.58964926", "0.5890138", "0.58770454", "0.586571", "0.58592165", "0.58560395", "0.5853215", "0.584921", "0.58411944", "0.5820981", "0.57644534", "0.57629895", "0.5761087", "0.57549345", "0.57439667", "0.5742135", "0.5731169", "0.57243556", "0.5707997", "0.570401", "0.57017344", "0.569077", "0.568537", "0.56817365", "0.5668661", "0.5666086", "0.566288", "0.56624734", "0.566032", "0.56579936", "0.56528544", "0.56448936", "0.5630685", "0.562969", "0.56278855", "0.5621436", "0.5618712", "0.5608061", "0.5603161", "0.55998206", "0.55945784", "0.5574706", "0.55725884", "0.5555796", "0.55363643", "0.55334955", "0.552803", "0.5524321", "0.5518366", "0.5518122", "0.55135566", "0.55099106", "0.5499479", "0.5495527", "0.54948694", "0.54825133", "0.5481502", "0.5478038", "0.5475902", "0.54735476", "0.5469913", "0.54618895", "0.54563254", "0.54368514", "0.5412439", "0.54049957", "0.53934026", "0.53924656", "0.5387306", "0.5386386", "0.53858083" ]
0.75419945
0
Return key from user_data if it's a dict
def get_user_data(self, key, default=None): if not isinstance(self._user_data, dict): return default return self._user_data.get(key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, key):\n return self._user_data.get(key)", "def _get_user_data(self):\n return {\"key\": self._key}", "def get(self, key):\n return self._user_data.get(key)", "def _get_key(self, object_type, user_key = None):\n\t\tif not user_key and not self.object_type_keys.has_key(object_type):\n\t\t\traise ParserError(\"Unknown key for object type: %s\\n\" % object_type)\n\n\t\t## Use a default key\n\t\tif not user_key:\n\t\t\tuser_key = self.object_type_keys[object_type]\n\n\t\treturn user_key", "def get_key_from_data_dict(data: dict, key: str):\n retrieved_key = data.get(key, None)\n if not retrieved_key:\n LOG.info(\n f\"Could not get key {key} from request to the API. Data received: {data}\"\n )\n return retrieved_key", "def key_data(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_data\")", "def get_key(self, user):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n key = ApiKey.objects.get(user=user)\r\n except ApiKey.DoesNotExist:\r\n return False\r\n\r\n return key.key", "def get_if_exist(self, data, key):\n if key in data:\n return data[key]\n return None", "def __is_key_in_json(self, key=str, json_dict=json):\n if key in json_dict:\n # noinspection PyUnresolvedReferences\n return json_dict[key]\n else:\n return self.NO_KEY_VALUE_FOR_ENTRY", "def data_key(self):\n raise NotImplementedError", "def datakey(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"datakey\")", "def _get_user_id(self, user: Optional[Dict[str, Any]]) -> Optional[str]:\n return user[\"id\"] if user and \"id\" in user else None", "def has_user_data(self, key):\n return isinstance(self._user_data, dict) and key in self._user_data", "def fetch(self, key: object, default=None):\n return self._user_data.get(key, default)", "def get_key(dictionary: dict, *args) -> Union[str, bool, dict]:\n data = reduce(lambda c, k: c.get(k, {}), args, dictionary)\n if data == {}:\n return \"\"\n return data", "def key(self):\n if \"key\" in self._prop_dict:\n return self._prop_dict[\"key\"]\n else:\n return None", "def __getitem__(self, key):\n return self.data.get(key, '')", "def getType(self):\n if self.use_dic:\n data = self.dic.keys()[0]\n act = self.dic[data].keys()[0]\n return self.dic[data][act].keys()[0]\n else:\n return None", "def key_data(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_data\")", "def key_data(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_data\")", "def user_data(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_data\")", "def user_data(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_data\")", "def extract_backing_type(value: dict) -> str:\n return next(iter(value.keys()))", "def get_key(self, item):\r\n return item[0]", "def _get_user_id(user_data: dict):\n data = json.dumps(user_data).encode('utf-8')\n hashed_data = hashlib.sha256()\n hashed_data.update(data)\n return hashed_data.hexdigest()", "def get_userdata(self, nick, datakey):\n nickkey = irc.strings.lower(nick)\n data = None\n if nickkey in self.users and datakey in self.users[nickkey]:\n data = self.users[nickkey][datakey]\n return data", "def meta_value(request_object, dictkey):\n \n try:\n val = request_object.META[dictkey]\n except: # Exception as ex:\n val = ''\n return val", "def getUserFromKey(key):\n\t\t#get(Key(key))\n\t\t#return None if no user found", "def check_user_type(self): # FIXME Buggy as screen_name can be int\n try:\n int(self.user)\n return {'user_id': self.user}\n except ValueError:\n return {'screen_name': self.user}", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")", "def extract_user_short(data):\n user_pk = data.get(\"id\", data.get(\"pk\"))\n assert user_pk, 'User without pk \"%s\"' % data\n return {\n \"pk\": int(user_pk),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data.get(\"is_private\"),\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n # \"is_unpublished\": data.get(\"is_unpublished\"),\n }", "def getKey(instance):\n return instance['name']", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def get_value(self) -> Dict[str, any]:", "def __parse_user_id(self, data):\n if 'user_id' in data:\n return data['user_id']\n if 'user' in data:\n return data['user']['id']\n return None", "def key(self):\n return self.value()._key", "def getRetKey(dictionary):\n retKey = \"\"\n try:\n if dictionary:\n retKey = dictionary.values()[0].keys()[0]\n except TypeError:\n logging.debug(\"type error\")\n\n return retKey", "def key(key):\n return key", "def user_id(self):\n return json_loads(self.user_json).get('id')", "def __getitem__(self, key):\r\n if key == 'name':\r\n return self.name\r\n elif key == 'type':\r\n return self.type\r\n elif key == 'tab_id':\r\n return self.tab_id\r\n else:\r\n raise KeyError('Key {0} not present in tab {1}'.format(key, self.to_json()))", "def make_external_key(self, data):\n return data['key']", "def key_object_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_object_name\")", "def get_key(self, user, api_key):\n return True", "def object_key(self) -> str:\n return self._values.get('object_key')", "def get_user_info_by_id(self, user_id: int) -> dict:", "def check_for_dict(check):", "def _access_dict(self, d, key):\n try:\n # try to get access to the value by using the key\n value = d[key]\n return value\n except:\n # fail to access the value from the key\n # namely, the feature does not exist in the \n # feature dictionary of a specific apartment\n return None", "def key_type(self) -> global___Type:", "def key(self) -> \"str\":\n return self._attrs.get(\"key\")", "def _get_key(name, obj):\r\n wanted_key = 'data'\r\n if isinstance(obj, h5py.Group):\r\n for key, val in list(obj.items()):\r\n if key == wanted_key:\r\n if isinstance(obj[key], h5py.Dataset):\r\n key_path = obj.name + \"/\" + key\r\n return key_path", "def __getattr__(self, key):\n if key == 'user_data':\n if key in self.data:\n return self.data[key]\n api = ApiClient()\n user_json = api.rest(self.url + '/user_data.json')\n self.user_data = UserData(json.loads(user_json), self.url)\n return self.user_data\n\n return super(Template, self).__getattr__(key)", "def has_dict_keys(self, dict_in_pointer):\n if type(dict_in_pointer)==str:\n dict_in_pointer = [dict_in_pointer]\n if type(dict_in_pointer)==dict:\n dict_in_pointer = dict_in_pointer.keys()\n if type(dict_in_pointer)!=list:\n return None\n start = self.head\n while start:\n if dict_in_pointer==start.getMember():\n return start\n start = start.getLink()\n return None", "def get_in(self, key=None, default=None):\n if key is None:\n raise KeyError(\"'Dict' attribute key can't be empty\")\n key_list = key.strip().split('.')\n data = self\n size = len(key_list)\n for index, k in enumerate(key_list):\n data = data.get(k)\n if index < size-1 and not isinstance(data, dict):\n return default\n return data", "def to_internal_value(self, data):\n try:\n if not isinstance(data, dict) or \"id\" not in data:\n raise TypeError\n return super().to_internal_value(data[\"id\"])\n except (TypeError, ValueError):\n self.fail(\"incorrect_type\", data_type=type(data).__name__)", "def __getitem__(self, key):\n if isinstance(key, str):\n phone = EntityDatabase.parse_phone(key)\n if phone:\n return self._phone_id[phone]\n else:\n key = key.lstrip('@').lower()\n return self._entities[self._username_id[key]]\n\n if isinstance(key, int):\n return self._entities[key] # normal IDs are assumed users\n\n if isinstance(key, TLObject):\n sc = type(key).SUBCLASS_OF_ID\n if sc == 0x2d45687:\n # Subclass of \"Peer\"\n return self._entities[utils.get_peer_id(key, add_mark=True)]\n elif sc in {0x2da17977, 0xc5af5d94, 0x6d44b7db}:\n # Subclass of \"User\", \"Chat\" or \"Channel\"\n return key\n\n raise KeyError(key)", "def user_display_name(self):\n return self.key.id()", "def _convert_dict_key(self, string):\r\n try:\r\n # Don't do any of this if the string is empty or None.\r\n if string is None:\r\n return None\r\n # If the string represents a container, convert it.\r\n elif string[0] in [\"(\", \"[\", \"{\"]:\r\n string = (\r\n str_utils.str_to_container(string))\r\n # Try other type conversions. Any that fail will leave the\r\n # variable untouched.\r\n else:\r\n string = str_utils.str_to_num(string)\r\n string = (\r\n str_utils.str_to_datetime(string))\r\n string = str_utils.str_to_bool(string)\r\n # end if\r\n return string\r\n except Exception as err:\r\n _z_exc(\"logentry.py/convert_dict_key\", err)\r\n # end try\r", "def get_metadata(resource, key, default_value):\n if ('object' in resource and 'user_metadata' in resource['object'] and\n key in resource['object']['user_metadata']):\n return resource['object']['user_metadata'][key]\n return default_value", "def get_key(self):\n return self._determine_key()", "def username(self):\n return json_loads(self.user_json).get('username')", "def get(self, key: t.Hashable) -> t.Any:", "def get_key(self, role):\n\n for key, role_name in self.assignable_roles[0].items():\n if role_name == role.name:\n return key" ]
[ "0.6847286", "0.65308297", "0.6454429", "0.63964266", "0.6262093", "0.618459", "0.61062187", "0.60399806", "0.59043604", "0.5890058", "0.5878438", "0.5858267", "0.5846593", "0.58453906", "0.5799683", "0.57730305", "0.57642144", "0.57528245", "0.5742863", "0.5742863", "0.5728808", "0.5728808", "0.5722891", "0.5696669", "0.5694399", "0.5691886", "0.56825244", "0.5680933", "0.56729406", "0.567122", "0.567122", "0.567122", "0.567122", "0.567122", "0.567122", "0.567122", "0.567122", "0.567122", "0.567122", "0.567122", "0.567122", "0.56624836", "0.56175345", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.56112355", "0.5550921", "0.55208933", "0.5504325", "0.54826486", "0.5480856", "0.5476723", "0.540954", "0.5397665", "0.5390716", "0.5384224", "0.53750604", "0.5365029", "0.535012", "0.5334877", "0.53298384", "0.5317377", "0.5306698", "0.528563", "0.5279906", "0.52794", "0.527539", "0.5269111", "0.5260414", "0.52578866", "0.5255594", "0.52532315", "0.52374935", "0.5231247", "0.5228284" ]
0.6621438
1